Commit 14fd06c7 authored by Thomas Gleixner's avatar Thomas Gleixner

irqchip: Convert all platform MSI users to the new API

Switch all the users of the platform MSI domain over to invoke the new
interfaces which branch to the original platform MSI functions when the
irqdomain associated to the caller device does not yet provide MSI parent
functionality.

No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAnup Patel <apatel@ventanamicro.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240127161753.114685-7-apatel@ventanamicro.com
parent c88f9110
......@@ -747,7 +747,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (IS_ERR(xor_dev->clk))
return PTR_ERR(xor_dev->clk);
ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
ret = platform_device_msi_init_and_alloc_irqs(&pdev->dev, 1,
mv_xor_v2_set_msi_msg);
if (ret)
return ret;
......@@ -851,7 +851,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
xor_dev->hw_desq_virt, xor_dev->hw_desq);
free_msi_irqs:
platform_msi_domain_free_irqs(&pdev->dev);
platform_device_msi_free_irqs_all(&pdev->dev);
return ret;
}
......@@ -867,7 +867,7 @@ static void mv_xor_v2_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev);
platform_msi_domain_free_irqs(&pdev->dev);
platform_device_msi_free_irqs_all(&pdev->dev);
tasklet_kill(&xor_dev->irq_tasklet);
}
......
......@@ -696,7 +696,7 @@ static void hidma_free_msis(struct hidma_dev *dmadev)
devm_free_irq(dev, virq, &dmadev->lldev);
}
platform_msi_domain_free_irqs(dev);
platform_device_msi_free_irqs_all(dev);
#endif
}
......@@ -706,7 +706,7 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
#ifdef CONFIG_GENERIC_MSI_IRQ
int rc, i, virq;
rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
rc = platform_device_msi_init_and_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
hidma_write_msi_msg);
if (rc)
return rc;
......
......@@ -3125,7 +3125,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
static void arm_smmu_free_msis(void *data)
{
struct device *dev = data;
platform_msi_domain_free_irqs(dev);
platform_device_msi_free_irqs_all(dev);
}
static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
......@@ -3166,7 +3167,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
}
/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
ret = platform_device_msi_init_and_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
return;
......
......@@ -1587,7 +1587,7 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
}
/* Allocate platform MSIs for each ring */
ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
ret = platform_device_msi_init_and_alloc_irqs(dev, mbox->num_rings,
flexrm_mbox_msi_write);
if (ret)
goto fail_destroy_cmpl_pool;
......@@ -1641,7 +1641,7 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
fail_free_debugfs_root:
debugfs_remove_recursive(mbox->root);
platform_msi_domain_free_irqs(dev);
platform_device_msi_free_irqs_all(dev);
fail_destroy_cmpl_pool:
dma_pool_destroy(mbox->cmpl_pool);
fail_destroy_bd_pool:
......@@ -1657,7 +1657,7 @@ static void flexrm_mbox_remove(struct platform_device *pdev)
debugfs_remove_recursive(mbox->root);
platform_msi_domain_free_irqs(dev);
platform_device_msi_free_irqs_all(dev);
dma_pool_destroy(mbox->cmpl_pool);
dma_pool_destroy(mbox->bd_pool);
......
......@@ -716,7 +716,7 @@ static void smmu_pmu_free_msis(void *data)
{
struct device *dev = data;
platform_msi_domain_free_irqs(dev);
platform_device_msi_free_irqs_all(dev);
}
static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
......@@ -746,7 +746,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
return;
ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
ret = platform_device_msi_init_and_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs\n");
return;
......
......@@ -1712,7 +1712,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
......@@ -1742,7 +1742,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
devm_free_irq(hba->dev, desc->irq, hba);
}
msi_unlock_descs(hba->dev);
platform_msi_domain_free_irqs(hba->dev);
platform_device_msi_free_irqs_all(hba->dev);
} else {
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
host->hw_ver.step == 0)
......@@ -1818,7 +1818,7 @@ static void ufs_qcom_remove(struct platform_device *pdev)
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
platform_msi_domain_free_irqs(hba->dev);
platform_device_msi_free_irqs_all(hba->dev);
}
static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment