Commit 4fa18345 authored by Michael Hernandez's avatar Michael Hernandez Committed by Martin K. Petersen

scsi: qla2xxx: Utilize pci_alloc_irq_vectors/pci_free_irq_vectors calls.

Replaces the old pci_enable_msi[x]* and pci_disable_msi[x] calls.
Signed-off-by: default avatarMichael Hernandez <michael.hernandez@cavium.com>
Signed-off-by: default avatarHimanshu Madhani <himanshu.madhani@cavium.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 77ddb94a
...@@ -2747,7 +2747,7 @@ struct qla_msix_entry { ...@@ -2747,7 +2747,7 @@ struct qla_msix_entry {
int have_irq; int have_irq;
uint32_t vector; uint32_t vector;
uint16_t entry; uint16_t entry;
struct rsp_que *rsp; void *handle;
struct irq_affinity_notify irq_notify; struct irq_affinity_notify irq_notify;
int cpuid; int cpuid;
}; };
......
...@@ -3025,52 +3025,17 @@ static struct qla_init_msix_entry qla83xx_msix_entries[3] = { ...@@ -3025,52 +3025,17 @@ static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
{ "qla2xxx (atio_q)", qla83xx_msix_atio_q }, { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
}; };
static void
qla24xx_disable_msix(struct qla_hw_data *ha)
{
int i;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
if (qentry->have_irq) {
/* un-register irq cpu affinity notification */
irq_set_affinity_notifier(qentry->vector, NULL);
free_irq(qentry->vector, qentry->rsp);
}
}
pci_disable_msix(ha->pdev);
kfree(ha->msix_entries);
ha->msix_entries = NULL;
ha->flags.msix_enabled = 0;
ql_dbg(ql_dbg_init, vha, 0x0042,
"Disabled the MSI.\n");
}
static int static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{ {
#define MIN_MSIX_COUNT 2 #define MIN_MSIX_COUNT 2
#define ATIO_VECTOR 2 #define ATIO_VECTOR 2
int i, ret; int i, ret;
struct msix_entry *entries;
struct qla_msix_entry *qentry; struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
GFP_KERNEL); PCI_IRQ_MSIX);
if (!entries) {
ql_log(ql_log_warn, vha, 0x00bc,
"Failed to allocate memory for msix_entry.\n");
return -ENOMEM;
}
for (i = 0; i < ha->msix_count; i++)
entries[i].entry = i;
ret = pci_enable_msix_range(ha->pdev,
entries, MIN_MSIX_COUNT, ha->msix_count);
if (ret < 0) { if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7, ql_log(ql_log_fatal, vha, 0x00c7,
"MSI-X: Failed to enable support, " "MSI-X: Failed to enable support, "
...@@ -3097,10 +3062,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ...@@ -3097,10 +3062,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
for (i = 0; i < ha->msix_count; i++) { for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i]; qentry = &ha->msix_entries[i];
qentry->vector = entries[i].vector; qentry->vector = pci_irq_vector(ha->pdev, i);
qentry->entry = entries[i].entry; qentry->entry = i;
qentry->have_irq = 0; qentry->have_irq = 0;
qentry->rsp = NULL; qentry->handle = NULL;
qentry->irq_notify.notify = qla_irq_affinity_notify; qentry->irq_notify.notify = qla_irq_affinity_notify;
qentry->irq_notify.release = qla_irq_affinity_release; qentry->irq_notify.release = qla_irq_affinity_release;
qentry->cpuid = -1; qentry->cpuid = -1;
...@@ -3109,7 +3074,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ...@@ -3109,7 +3074,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
/* Enable MSI-X vectors for the base queue */ /* Enable MSI-X vectors for the base queue */
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i]; qentry = &ha->msix_entries[i];
qentry->rsp = rsp; qentry->handle = rsp;
rsp->msix = qentry; rsp->msix = qentry;
if (IS_P3P_TYPE(ha)) if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector, ret = request_irq(qentry->vector,
...@@ -3142,7 +3107,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ...@@ -3142,7 +3107,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
*/ */
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
qentry = &ha->msix_entries[ATIO_VECTOR]; qentry = &ha->msix_entries[ATIO_VECTOR];
qentry->rsp = rsp; qentry->handle = rsp;
rsp->msix = qentry; rsp->msix = qentry;
ret = request_irq(qentry->vector, ret = request_irq(qentry->vector,
qla83xx_msix_entries[ATIO_VECTOR].handler, qla83xx_msix_entries[ATIO_VECTOR].handler,
...@@ -3155,7 +3120,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ...@@ -3155,7 +3120,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
ql_log(ql_log_fatal, vha, 0x00cb, ql_log(ql_log_fatal, vha, 0x00cb,
"MSI-X: unable to register handler -- %x/%d.\n", "MSI-X: unable to register handler -- %x/%d.\n",
qentry->vector, ret); qentry->vector, ret);
qla24xx_disable_msix(ha); qla2x00_free_irqs(vha);
ha->mqenable = 0; ha->mqenable = 0;
goto msix_out; goto msix_out;
} }
...@@ -3177,7 +3142,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ...@@ -3177,7 +3142,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
msix_out: msix_out:
kfree(entries);
return ret; return ret;
} }
...@@ -3230,7 +3194,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) ...@@ -3230,7 +3194,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
!IS_QLA27XX(ha)) !IS_QLA27XX(ha))
goto skip_msi; goto skip_msi;
ret = pci_enable_msi(ha->pdev); ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
if (!ret) { if (!ret) {
ql_dbg(ql_dbg_init, vha, 0x0038, ql_dbg(ql_dbg_init, vha, 0x0038,
"MSI: Enabled.\n"); "MSI: Enabled.\n");
...@@ -3275,6 +3239,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) ...@@ -3275,6 +3239,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
{ {
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct rsp_que *rsp; struct rsp_que *rsp;
struct qla_msix_entry *qentry;
int i;
/* /*
* We need to check that ha->rsp_q_map is valid in case we are called * We need to check that ha->rsp_q_map is valid in case we are called
...@@ -3284,13 +3250,24 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) ...@@ -3284,13 +3250,24 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
return; return;
rsp = ha->rsp_q_map[0]; rsp = ha->rsp_q_map[0];
if (ha->flags.msix_enabled) if (ha->flags.msix_enabled) {
qla24xx_disable_msix(ha); for (i = 0; i < ha->msix_count; i++) {
else if (ha->flags.msi_enabled) { qentry = &ha->msix_entries[i];
free_irq(ha->pdev->irq, rsp); if (qentry->have_irq) {
pci_disable_msi(ha->pdev); irq_set_affinity_notifier(qentry->vector, NULL);
} else free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
free_irq(ha->pdev->irq, rsp); }
}
kfree(ha->msix_entries);
ha->msix_entries = NULL;
ha->flags.msix_enabled = 0;
ql_dbg(ql_dbg_init, vha, 0x0042,
"Disabled MSI-X.\n");
} else {
free_irq(pci_irq_vector(ha->pdev, 0), rsp);
}
pci_free_irq_vectors(ha->pdev);
} }
...@@ -3310,7 +3287,7 @@ int qla25xx_request_irq(struct rsp_que *rsp) ...@@ -3310,7 +3287,7 @@ int qla25xx_request_irq(struct rsp_que *rsp)
return ret; return ret;
} }
msix->have_irq = 1; msix->have_irq = 1;
msix->rsp = rsp; msix->handle = rsp;
return ret; return ret;
} }
...@@ -3323,11 +3300,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify, ...@@ -3323,11 +3300,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
container_of(notify, struct qla_msix_entry, irq_notify); container_of(notify, struct qla_msix_entry, irq_notify);
struct qla_hw_data *ha; struct qla_hw_data *ha;
struct scsi_qla_host *base_vha; struct scsi_qla_host *base_vha;
struct rsp_que *rsp = e->handle;
/* user is recommended to set mask to just 1 cpu */ /* user is recommended to set mask to just 1 cpu */
e->cpuid = cpumask_first(mask); e->cpuid = cpumask_first(mask);
ha = e->rsp->hw; ha = rsp->hw;
base_vha = pci_get_drvdata(ha->pdev); base_vha = pci_get_drvdata(ha->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff, ql_dbg(ql_dbg_init, base_vha, 0xffff,
...@@ -3351,7 +3329,8 @@ static void qla_irq_affinity_release(struct kref *ref) ...@@ -3351,7 +3329,8 @@ static void qla_irq_affinity_release(struct kref *ref)
container_of(ref, struct irq_affinity_notify, kref); container_of(ref, struct irq_affinity_notify, kref);
struct qla_msix_entry *e = struct qla_msix_entry *e =
container_of(notify, struct qla_msix_entry, irq_notify); container_of(notify, struct qla_msix_entry, irq_notify);
struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev); struct rsp_que *rsp = e->handle;
struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
ql_dbg(ql_dbg_init, base_vha, 0xffff, ql_dbg(ql_dbg_init, base_vha, 0xffff,
"%s: host%ld: vector %d cpu %d \n", __func__, "%s: host%ld: vector %d cpu %d \n", __func__,
......
...@@ -542,7 +542,7 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) ...@@ -542,7 +542,7 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
if (rsp->msix && rsp->msix->have_irq) { if (rsp->msix && rsp->msix->have_irq) {
free_irq(rsp->msix->vector, rsp); free_irq(rsp->msix->vector, rsp);
rsp->msix->have_irq = 0; rsp->msix->have_irq = 0;
rsp->msix->rsp = NULL; rsp->msix->handle = NULL;
} }
dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
sizeof(response_t), rsp->ring, rsp->dma); sizeof(response_t), rsp->ring, rsp->dma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment