Commit a2a04b51 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2023-02-16' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

Multiple fixes in vc4 to address issues with YUV planes, HDMI and CRTC;
an invalid page access fix for fbdev, mark dynamic debug as broken, a
double free and refcounting fix for vmwgfx.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20230216091905.i5wswy4dd74x4br5@houat
parents caa068c9 a950b989
...@@ -53,7 +53,8 @@ config DRM_DEBUG_MM ...@@ -53,7 +53,8 @@ config DRM_DEBUG_MM
config DRM_USE_DYNAMIC_DEBUG config DRM_USE_DYNAMIC_DEBUG
bool "use dynamic debug to implement drm.debug" bool "use dynamic debug to implement drm.debug"
default y default n
depends on BROKEN
depends on DRM depends on DRM
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
depends on JUMP_LABEL depends on JUMP_LABEL
......
...@@ -714,7 +714,7 @@ static int ast_primary_plane_init(struct ast_private *ast) ...@@ -714,7 +714,7 @@ static int ast_primary_plane_init(struct ast_private *ast)
struct ast_plane *ast_primary_plane = &ast->primary_plane; struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base; struct drm_plane *primary_plane = &ast_primary_plane->base;
void __iomem *vaddr = ast->vram; void __iomem *vaddr = ast->vram;
u64 offset = ast->vram_base; u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
unsigned long size = ast->vram_fb_available - cursor_size; unsigned long size = ast->vram_fb_available - cursor_size;
int ret; int ret;
...@@ -972,7 +972,7 @@ static int ast_cursor_plane_init(struct ast_private *ast) ...@@ -972,7 +972,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
return -ENOMEM; return -ENOMEM;
vaddr = ast->vram + ast->vram_fb_available - size; vaddr = ast->vram + ast->vram_fb_available - size;
offset = ast->vram_base + ast->vram_fb_available - size; offset = ast->vram_fb_available - size;
ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size, ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
0x01, &ast_cursor_plane_funcs, 0x01, &ast_cursor_plane_funcs,
......
...@@ -711,7 +711,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -711,7 +711,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) { if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000, vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
mode->clock * 9 / 10) * 1000; mode->clock * 9 / 10) * 1000;
} else { } else {
vc4_state->hvs_load = mode->clock * 1000; vc4_state->hvs_load = mode->clock * 1000;
......
...@@ -97,6 +97,10 @@ ...@@ -97,6 +97,10 @@
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8 #define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8) #define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK VC4_MASK(7, 0)
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_SET_AVMUTE BIT(0)
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE BIT(4)
# define VC4_HD_M_SW_RST BIT(2) # define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0) # define VC4_HD_M_ENABLE BIT(0)
...@@ -1306,7 +1310,6 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, ...@@ -1306,7 +1310,6 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_HDMI_VERTB_VBP)); VC4_HDMI_VERTB_VBP));
unsigned long flags; unsigned long flags;
unsigned char gcp; unsigned char gcp;
bool gcp_en;
u32 reg; u32 reg;
int idx; int idx;
...@@ -1341,16 +1344,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, ...@@ -1341,16 +1344,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
switch (vc4_state->output_bpc) { switch (vc4_state->output_bpc) {
case 12: case 12:
gcp = 6; gcp = 6;
gcp_en = true;
break; break;
case 10: case 10:
gcp = 5; gcp = 5;
gcp_en = true;
break; break;
case 8: case 8:
default: default:
gcp = 4; gcp = 0;
gcp_en = false;
break; break;
} }
...@@ -1359,8 +1359,7 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, ...@@ -1359,8 +1359,7 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
* doesn't signal in GCP. * doesn't signal in GCP.
*/ */
if (vc4_state->output_format == VC4_HDMI_OUTPUT_YUV422) { if (vc4_state->output_format == VC4_HDMI_OUTPUT_YUV422) {
gcp = 4; gcp = 0;
gcp_en = false;
} }
reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1); reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
...@@ -1373,11 +1372,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, ...@@ -1373,11 +1372,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
reg = HDMI_READ(HDMI_GCP_WORD_1); reg = HDMI_READ(HDMI_GCP_WORD_1);
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK; reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1); reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK;
reg |= VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE;
HDMI_WRITE(HDMI_GCP_WORD_1, reg); HDMI_WRITE(HDMI_GCP_WORD_1, reg);
reg = HDMI_READ(HDMI_GCP_CONFIG); reg = HDMI_READ(HDMI_GCP_CONFIG);
reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE; reg |= VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
HDMI_WRITE(HDMI_GCP_CONFIG, reg); HDMI_WRITE(HDMI_GCP_CONFIG, reg);
reg = HDMI_READ(HDMI_MISC_CONTROL); reg = HDMI_READ(HDMI_MISC_CONTROL);
......
...@@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) ...@@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{ {
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb; struct drm_framebuffer *fb = state->fb;
struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0); struct drm_gem_dma_object *bo;
int num_planes = fb->format->num_planes; int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub; u32 h_subsample = fb->format->hsub;
...@@ -359,8 +359,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) ...@@ -359,8 +359,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
if (ret) if (ret)
return ret; return ret;
for (i = 0; i < num_planes; i++) for (i = 0; i < num_planes; i++) {
bo = drm_fb_dma_get_gem_obj(fb, i);
vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i]; vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
}
/* /*
* We don't support subpixel source positioning for scaling, * We don't support subpixel source positioning for scaling,
......
...@@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw, ...@@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw,
return -ENOMEM; return -ENOMEM;
} }
/*
* vmw_bo_init will delete the *p_bo object if it fails
*/
ret = vmw_bo_init(vmw, *p_bo, size, ret = vmw_bo_init(vmw, *p_bo, size,
placement, interruptible, pin, placement, interruptible, pin,
bo_free); bo_free);
...@@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw, ...@@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw,
return ret; return ret;
out_error: out_error:
kfree(*p_bo);
*p_bo = NULL; *p_bo = NULL;
return ret; return ret;
} }
...@@ -596,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp, ...@@ -596,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
ttm_bo_put(&vmw_bo->base); ttm_bo_put(&vmw_bo->base);
} }
drm_gem_object_put(&vmw_bo->base.base);
return ret; return ret;
} }
...@@ -636,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -636,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo); vmw_bo_unreference(&vbo);
drm_gem_object_put(&vbo->base.base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY) if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY; return -EBUSY;
...@@ -693,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, ...@@ -693,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
* struct vmw_buffer_object should be placed. * struct vmw_buffer_object should be placed.
* Return: Zero on success, Negative error code on error. * Return: Zero on success, Negative error code on error.
* *
* The vmw buffer object pointer will be refcounted. * The vmw buffer object pointer will be refcounted (both ttm and gem)
*/ */
int vmw_user_bo_lookup(struct drm_file *filp, int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle, uint32_t handle,
...@@ -710,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp, ...@@ -710,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
*out = gem_to_vmw_bo(gobj); *out = gem_to_vmw_bo(gobj);
ttm_bo_get(&(*out)->base); ttm_bo_get(&(*out)->base);
drm_gem_object_put(gobj);
return 0; return 0;
} }
...@@ -791,7 +794,8 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -791,7 +794,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle, args->size, &args->handle,
&vbo); &vbo);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->base.base);
return ret; return ret;
} }
......
...@@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
} }
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
ttm_bo_put(&vmw_bo->base); ttm_bo_put(&vmw_bo->base);
drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
} }
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
ttm_bo_put(&vmw_bo->base); ttm_bo_put(&vmw_bo->base);
drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
......
...@@ -146,14 +146,12 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, ...@@ -146,14 +146,12 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
&vmw_sys_placement : &vmw_sys_placement :
&vmw_vram_sys_placement, &vmw_vram_sys_placement,
true, false, &vmw_gem_destroy, p_vbo); true, false, &vmw_gem_destroy, p_vbo);
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
if (ret != 0) if (ret != 0)
goto out_no_bo; goto out_no_bo;
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle); ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&(*p_vbo)->base.base);
out_no_bo: out_no_bo:
return ret; return ret;
} }
...@@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, ...@@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle; rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0; rep->cur_gmr_offset = 0;
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->base.base);
out_no_bo: out_no_bo:
return ret; return ret;
} }
......
...@@ -1815,8 +1815,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1815,8 +1815,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out: err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */ /* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo) if (bo) {
vmw_bo_unreference(&bo); vmw_bo_unreference(&bo);
drm_gem_object_put(&bo->base.base);
}
if (surface) if (surface)
vmw_surface_unreference(&surface); vmw_surface_unreference(&surface);
......
...@@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf); vmw_bo_unreference(&buf);
drm_gem_object_put(&buf->base.base);
out_unlock: out_unlock:
mutex_unlock(&overlay->mutex); mutex_unlock(&overlay->mutex);
......
...@@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ...@@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
num_output_sig, tfile, shader_handle); num_output_sig, tfile, shader_handle);
out_bad_arg: out_bad_arg:
vmw_bo_unreference(&buffer); vmw_bo_unreference(&buffer);
drm_gem_object_put(&buffer->base.base);
return ret; return ret;
} }
......
...@@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) ...@@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base); container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res; struct vmw_resource *res = &user_srf->srf.res;
if (base->shareable && res && res->backup) if (res && res->backup)
drm_gem_object_put(&res->backup->base.base); drm_gem_object_put(&res->backup->base.base);
*p_base = NULL; *p_base = NULL;
...@@ -864,7 +864,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -864,7 +864,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
vmw_bo_reference(res->backup); vmw_bo_reference(res->backup);
drm_gem_object_get(&res->backup->base.base); /*
* We don't expose the handle to the userspace and surface
* already holds a gem reference
*/
drm_gem_handle_delete(file_priv, backup_handle);
} }
tmp = vmw_resource_reference(&srf->res); tmp = vmw_resource_reference(&srf->res);
...@@ -1568,8 +1572,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1568,8 +1572,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node); drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size; rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle; rep->buffer_handle = backup_handle;
if (user_srf->prime.base.shareable)
drm_gem_object_get(&res->backup->base.base);
} else { } else {
rep->buffer_map_handle = 0; rep->buffer_map_handle = 0;
rep->buffer_size = 0; rep->buffer_size = 0;
......
...@@ -313,7 +313,7 @@ void fb_deferred_io_open(struct fb_info *info, ...@@ -313,7 +313,7 @@ void fb_deferred_io_open(struct fb_info *info,
} }
EXPORT_SYMBOL_GPL(fb_deferred_io_open); EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info) void fb_deferred_io_release(struct fb_info *info)
{ {
struct fb_deferred_io *fbdefio = info->fbdefio; struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page; struct page *page;
...@@ -327,6 +327,14 @@ void fb_deferred_io_cleanup(struct fb_info *info) ...@@ -327,6 +327,14 @@ void fb_deferred_io_cleanup(struct fb_info *info)
page = fb_deferred_io_page(info, i); page = fb_deferred_io_page(info, i);
page->mapping = NULL; page->mapping = NULL;
} }
}
EXPORT_SYMBOL_GPL(fb_deferred_io_release);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
fb_deferred_io_release(info);
kvfree(info->pagerefs); kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock); mutex_destroy(&fbdefio->lock);
......
...@@ -1454,6 +1454,10 @@ __releases(&info->lock) ...@@ -1454,6 +1454,10 @@ __releases(&info->lock)
struct fb_info * const info = file->private_data; struct fb_info * const info = file->private_data;
lock_fb_info(info); lock_fb_info(info);
#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
if (info->fbdefio)
fb_deferred_io_release(info);
#endif
if (info->fbops->fb_release) if (info->fbops->fb_release)
info->fbops->fb_release(info,1); info->fbops->fb_release(info,1);
module_put(info->fbops->owner); module_put(info->fbops->owner);
......
...@@ -662,6 +662,7 @@ extern int fb_deferred_io_init(struct fb_info *info); ...@@ -662,6 +662,7 @@ extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info, extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode, struct inode *inode,
struct file *file); struct file *file);
extern void fb_deferred_io_release(struct fb_info *info);
extern void fb_deferred_io_cleanup(struct fb_info *info); extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start, extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync); loff_t end, int datasync);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment