Commit c53a1aee authored by Dmitry Baryshkov's avatar Dmitry Baryshkov Committed by Rob Clark

drm/msm/dpu: move resource allocation to the _probe function

To let the probe function bail early if any of the resources is
unavailable, move resource allocattion from kms_init directly to the
probe callback. While we are at it, replace irq_of_parse_and_map() with
platform_get_irq().

This also drops devm_iounmap() calls. It is too early to have them
_dpu_kms_hw_destroy() (or it will break if for some reason DPU device is
rebound into the composite device) and it doesn't make sense to have
them in dpu_dev_remove (as everything will be torn down by the devres
anyway after the device is unbound from the driver).
Reviewed-by: default avatarRob Clark <robdclark@gmail.com>
Signed-off-by: default avatarDmitry Baryshkov <dmitry.baryshkov@linaro.org>
Patchwork: https://patchwork.freedesktop.org/patch/561629/Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent a2ab5d5b
......@@ -389,8 +389,7 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
{
struct icc_path *path0;
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
struct device *dpu_dev = dev->dev;
struct device *dpu_dev = &dpu_kms->pdev->dev;
path0 = msm_icc_get(dpu_dev, "mdp0-mem");
path1 = msm_icc_get(dpu_dev, "mdp1-mem");
......@@ -829,21 +828,9 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_kms->catalog = NULL;
if (dpu_kms->vbif[VBIF_NRT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
dpu_kms->vbif[VBIF_NRT] = NULL;
if (dpu_kms->vbif[VBIF_RT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
dpu_kms->vbif[VBIF_RT] = NULL;
if (dpu_kms->hw_mdp)
dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
dpu_kms->hw_mdp = NULL;
if (dpu_kms->mmio)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
dpu_kms->mmio = NULL;
}
static void dpu_kms_destroy(struct msm_kms *kms)
......@@ -1079,30 +1066,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
atomic_set(&dpu_kms->bandwidth_ref, 0);
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
if (IS_ERR(dpu_kms->mmio)) {
rc = PTR_ERR(dpu_kms->mmio);
DPU_ERROR("mdp register memory map failed: %d\n", rc);
dpu_kms->mmio = NULL;
goto error;
}
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
DPU_ERROR("vbif register memory map failed: %d\n", rc);
dpu_kms->vbif[VBIF_RT] = NULL;
goto error;
}
dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
dpu_kms->vbif[VBIF_NRT] = NULL;
DPU_DEBUG("VBIF NRT is not defined");
}
dpu_kms_parse_data_bus_icc_path(dpu_kms);
rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
if (rc < 0)
goto error;
......@@ -1241,33 +1204,11 @@ static int dpu_kms_init(struct drm_device *ddev)
struct msm_drm_private *priv = ddev->dev_private;
struct device *dev = ddev->dev;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms;
int irq;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct dev_pm_opp *opp;
int ret = 0;
unsigned long max_freq = ULONG_MAX;
dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
if (!dpu_kms)
return -ENOMEM;
ret = devm_pm_opp_set_clkname(dev, "core");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(dev);
if (ret && ret != -ENODEV) {
dev_err(dev, "invalid OPP table in device tree\n");
return ret;
}
ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
if (ret < 0) {
DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
return ret;
}
dpu_kms->num_clocks = ret;
opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
if (!IS_ERR(opp))
dev_pm_opp_put(opp);
......@@ -1280,26 +1221,74 @@ static int dpu_kms_init(struct drm_device *ddev)
return ret;
}
dpu_kms->dev = ddev;
dpu_kms->pdev = pdev;
pm_runtime_enable(&pdev->dev);
dpu_kms->rpm_enabled = true;
priv->kms = &dpu_kms->base;
irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
if (!irq) {
DPU_ERROR("failed to get irq\n");
return -EINVAL;
}
dpu_kms->base.irq = irq;
return 0;
}
static int dpu_dev_probe(struct platform_device *pdev)
{
return msm_drv_probe(&pdev->dev, dpu_kms_init, NULL);
struct device *dev = &pdev->dev;
struct dpu_kms *dpu_kms;
int irq;
int ret = 0;
dpu_kms = devm_kzalloc(dev, sizeof(*dpu_kms), GFP_KERNEL);
if (!dpu_kms)
return -ENOMEM;
dpu_kms->pdev = pdev;
ret = devm_pm_opp_set_clkname(dev, "core");
if (ret)
return ret;
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(dev);
if (ret && ret != -ENODEV)
return dev_err_probe(dev, ret, "invalid OPP table in device tree\n");
ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to parse clocks\n");
dpu_kms->num_clocks = ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return dev_err_probe(dev, irq, "failed to get irq\n");
dpu_kms->base.irq = irq;
dpu_kms->mmio = msm_ioremap(pdev, "mdp");
if (IS_ERR(dpu_kms->mmio)) {
ret = PTR_ERR(dpu_kms->mmio);
DPU_ERROR("mdp register memory map failed: %d\n", ret);
dpu_kms->mmio = NULL;
return ret;
}
DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
DPU_ERROR("vbif register memory map failed: %d\n", ret);
dpu_kms->vbif[VBIF_RT] = NULL;
return ret;
}
dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt");
if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
dpu_kms->vbif[VBIF_NRT] = NULL;
DPU_DEBUG("VBIF NRT is not defined");
}
ret = dpu_kms_parse_data_bus_icc_path(dpu_kms);
if (ret)
return ret;
return msm_drv_probe(&pdev->dev, dpu_kms_init, &dpu_kms->base);
}
static int dpu_dev_remove(struct platform_device *pdev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment