Commit fcc21447 authored by Thomas Zimmermann's avatar Thomas Zimmermann

drm/udl: Use damage iterator

Use a damage iterator to process damage areas individually. Merging
damage areas can result in large updates of unchanged framebuffer
regions. As USB is rather slow, it's better to process damage areas
individually and hence minimize USB-transfered data.

As part of the change, move drm_gem_fb_{begin,end}_cpu_access() into
the plane's atomic_update helper. To avoid overhead and intermediate
writers, we want to synchronize buffers and reserve access only once
before copying damage areas of the framebuffer.

v2:
	* clarify commit message (Javier)
Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: default avatarJavier Martinez Canillas <javierm@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221006095355.23579-11-tzimmermann@suse.de
parent ca2bd373
...@@ -238,15 +238,9 @@ static int udl_handle_damage(struct drm_framebuffer *fb, ...@@ -238,15 +238,9 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret; return ret;
log_bpp = ret; log_bpp = ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
urb = udl_get_urb(dev); urb = udl_get_urb(dev);
if (!urb) { if (!urb)
ret = -ENOMEM; return -ENOMEM;
goto out_drm_gem_fb_end_cpu_access;
}
cmd = urb->transfer_buffer; cmd = urb->transfer_buffer;
for (i = clip->y1; i < clip->y2; i++) { for (i = clip->y1; i < clip->y2; i++) {
...@@ -258,7 +252,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, ...@@ -258,7 +252,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
&cmd, byte_offset, dev_byte_offset, &cmd, byte_offset, dev_byte_offset,
byte_width); byte_width);
if (ret) if (ret)
goto out_drm_gem_fb_end_cpu_access; return ret;
} }
if (cmd > (char *)urb->transfer_buffer) { if (cmd > (char *)urb->transfer_buffer) {
...@@ -272,11 +266,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, ...@@ -272,11 +266,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
udl_urb_completion(urb); udl_urb_completion(urb);
} }
ret = 0; return 0;
out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return ret;
} }
/* /*
...@@ -301,19 +291,29 @@ static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane, ...@@ -301,19 +291,29 @@ static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb; struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_rect rect; struct drm_atomic_helper_damage_iter iter;
int idx; struct drm_rect damage;
int ret, idx;
if (!drm_dev_enter(dev, &idx))
return;
if (!fb) if (!fb)
return; /* no framebuffer; plane is disabled */ return; /* no framebuffer; plane is disabled */
if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &rect)) ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
udl_handle_damage(fb, &shadow_plane_state->data[0], &rect); if (ret)
return;
if (!drm_dev_enter(dev, &idx))
goto out_drm_gem_fb_end_cpu_access;
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
udl_handle_damage(fb, &shadow_plane_state->data[0], &damage);
}
drm_dev_exit(idx); drm_dev_exit(idx);
out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
} }
static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = { static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment