Commit 84d7d472 authored by Maxime Ripard's avatar Maxime Ripard

drm/vc4: Use devm_drm_dev_alloc

We can simplify a bit the bind code, its error path and unbind by using
the managed devm_drm_dev_alloc function.
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarMaxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20201029190104.2181730-5-maxime@cerno.tech
parent 88e08589
...@@ -449,7 +449,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, ...@@ -449,7 +449,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
} }
if (IS_ERR(cma_obj)) { if (IS_ERR(cma_obj)) {
struct drm_printer p = drm_info_printer(vc4->dev->dev); struct drm_printer p = drm_info_printer(vc4->base.dev);
DRM_ERROR("Failed to allocate from CMA:\n"); DRM_ERROR("Failed to allocate from CMA:\n");
vc4_bo_stats_print(&p, vc4); vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -590,7 +590,7 @@ static void vc4_bo_cache_time_work(struct work_struct *work) ...@@ -590,7 +590,7 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
{ {
struct vc4_dev *vc4 = struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, bo_cache.time_work); container_of(work, struct vc4_dev, bo_cache.time_work);
struct drm_device *dev = vc4->dev; struct drm_device *dev = &vc4->base;
mutex_lock(&vc4->bo_lock); mutex_lock(&vc4->bo_lock);
vc4_bo_cache_free_old(dev); vc4_bo_cache_free_old(dev);
......
...@@ -257,41 +257,37 @@ static int vc4_drm_bind(struct device *dev) ...@@ -257,41 +257,37 @@ static int vc4_drm_bind(struct device *dev)
dev->coherent_dma_mask = DMA_BIT_MASK(32); dev->coherent_dma_mask = DMA_BIT_MASK(32);
vc4 = devm_kzalloc(dev, sizeof(*vc4), GFP_KERNEL);
if (!vc4)
return -ENOMEM;
/* If VC4 V3D is missing, don't advertise render nodes. */ /* If VC4 V3D is missing, don't advertise render nodes. */
node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL); node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
if (!node || !of_device_is_available(node)) if (!node || !of_device_is_available(node))
vc4_drm_driver.driver_features &= ~DRIVER_RENDER; vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
of_node_put(node); of_node_put(node);
drm = drm_dev_alloc(&vc4_drm_driver, dev); vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
if (IS_ERR(drm)) if (IS_ERR(vc4))
return PTR_ERR(drm); return PTR_ERR(vc4);
drm = &vc4->base;
platform_set_drvdata(pdev, drm); platform_set_drvdata(pdev, drm);
vc4->dev = drm;
drm->dev_private = vc4;
INIT_LIST_HEAD(&vc4->debugfs_list); INIT_LIST_HEAD(&vc4->debugfs_list);
mutex_init(&vc4->bin_bo_lock); mutex_init(&vc4->bin_bo_lock);
ret = vc4_bo_cache_init(drm); ret = vc4_bo_cache_init(drm);
if (ret) if (ret)
goto dev_put; return ret;
ret = drmm_mode_config_init(drm); ret = drmm_mode_config_init(drm);
if (ret) if (ret)
goto dev_put; return ret;
ret = vc4_gem_init(drm); ret = vc4_gem_init(drm);
if (ret) if (ret)
goto dev_put; return ret;
ret = component_bind_all(dev, drm); ret = component_bind_all(dev, drm);
if (ret) if (ret)
goto dev_put; return ret;
ret = vc4_plane_create_additional_planes(drm); ret = vc4_plane_create_additional_planes(drm);
if (ret) if (ret)
...@@ -316,8 +312,7 @@ static int vc4_drm_bind(struct device *dev) ...@@ -316,8 +312,7 @@ static int vc4_drm_bind(struct device *dev)
unbind_all: unbind_all:
component_unbind_all(dev, drm); component_unbind_all(dev, drm);
dev_put:
drm_dev_put(drm);
return ret; return ret;
} }
...@@ -332,8 +327,6 @@ static void vc4_drm_unbind(struct device *dev) ...@@ -332,8 +327,6 @@ static void vc4_drm_unbind(struct device *dev)
drm_atomic_private_obj_fini(&vc4->load_tracker); drm_atomic_private_obj_fini(&vc4->load_tracker);
drm_atomic_private_obj_fini(&vc4->ctm_manager); drm_atomic_private_obj_fini(&vc4->ctm_manager);
drm_dev_put(drm);
} }
static const struct component_master_ops vc4_drm_ops = { static const struct component_master_ops vc4_drm_ops = {
......
...@@ -72,7 +72,7 @@ struct vc4_perfmon { ...@@ -72,7 +72,7 @@ struct vc4_perfmon {
}; };
struct vc4_dev { struct vc4_dev {
struct drm_device *dev; struct drm_device base;
struct vc4_hvs *hvs; struct vc4_hvs *hvs;
struct vc4_v3d *v3d; struct vc4_v3d *v3d;
...@@ -235,7 +235,7 @@ struct vc4_dev { ...@@ -235,7 +235,7 @@ struct vc4_dev {
static inline struct vc4_dev * static inline struct vc4_dev *
to_vc4_dev(struct drm_device *dev) to_vc4_dev(struct drm_device *dev)
{ {
return (struct vc4_dev *)dev->dev_private; return container_of(dev, struct vc4_dev, base);
} }
struct vc4_bo { struct vc4_bo {
......
...@@ -314,16 +314,16 @@ vc4_reset_work(struct work_struct *work) ...@@ -314,16 +314,16 @@ vc4_reset_work(struct work_struct *work)
struct vc4_dev *vc4 = struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, hangcheck.reset_work); container_of(work, struct vc4_dev, hangcheck.reset_work);
vc4_save_hang_state(vc4->dev); vc4_save_hang_state(&vc4->base);
vc4_reset(vc4->dev); vc4_reset(&vc4->base);
} }
static void static void
vc4_hangcheck_elapsed(struct timer_list *t) vc4_hangcheck_elapsed(struct timer_list *t)
{ {
struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer); struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
struct drm_device *dev = vc4->dev; struct drm_device *dev = &vc4->base;
uint32_t ct0ca, ct1ca; uint32_t ct0ca, ct1ca;
unsigned long irqflags; unsigned long irqflags;
struct vc4_exec_info *bin_exec, *render_exec; struct vc4_exec_info *bin_exec, *render_exec;
...@@ -1000,7 +1000,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4) ...@@ -1000,7 +1000,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
list_del(&exec->head); list_del(&exec->head);
spin_unlock_irqrestore(&vc4->job_lock, irqflags); spin_unlock_irqrestore(&vc4->job_lock, irqflags);
vc4_complete_exec(vc4->dev, exec); vc4_complete_exec(&vc4->base, exec);
spin_lock_irqsave(&vc4->job_lock, irqflags); spin_lock_irqsave(&vc4->job_lock, irqflags);
} }
...@@ -1258,7 +1258,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, ...@@ -1258,7 +1258,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
fail: fail:
vc4_complete_exec(vc4->dev, exec); vc4_complete_exec(&vc4->base, exec);
return ret; return ret;
} }
......
...@@ -168,7 +168,7 @@ static void vc4_v3d_init_hw(struct drm_device *dev) ...@@ -168,7 +168,7 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
{ {
struct drm_device *dev = vc4->dev; struct drm_device *dev = &vc4->base;
unsigned long irqflags; unsigned long irqflags;
int slot; int slot;
uint64_t seqno = 0; uint64_t seqno = 0;
...@@ -246,7 +246,7 @@ static int bin_bo_alloc(struct vc4_dev *vc4) ...@@ -246,7 +246,7 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
while (true) { while (true) {
struct vc4_bo *bo = vc4_bo_create(vc4->dev, size, true, struct vc4_bo *bo = vc4_bo_create(&vc4->base, size, true,
VC4_BO_TYPE_BIN); VC4_BO_TYPE_BIN);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
...@@ -361,7 +361,7 @@ static int vc4_v3d_runtime_suspend(struct device *dev) ...@@ -361,7 +361,7 @@ static int vc4_v3d_runtime_suspend(struct device *dev)
struct vc4_v3d *v3d = dev_get_drvdata(dev); struct vc4_v3d *v3d = dev_get_drvdata(dev);
struct vc4_dev *vc4 = v3d->vc4; struct vc4_dev *vc4 = v3d->vc4;
vc4_irq_uninstall(vc4->dev); vc4_irq_uninstall(&vc4->base);
clk_disable_unprepare(v3d->clk); clk_disable_unprepare(v3d->clk);
...@@ -378,11 +378,11 @@ static int vc4_v3d_runtime_resume(struct device *dev) ...@@ -378,11 +378,11 @@ static int vc4_v3d_runtime_resume(struct device *dev)
if (ret != 0) if (ret != 0)
return ret; return ret;
vc4_v3d_init_hw(vc4->dev); vc4_v3d_init_hw(&vc4->base);
/* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */ /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
enable_irq(vc4->dev->irq); enable_irq(vc4->base.irq);
vc4_irq_postinstall(vc4->dev); vc4_irq_postinstall(&vc4->base);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment