Commit 298799a2 authored by Zack Rusin's avatar Zack Rusin

drm/vmwgfx: Fix gem refcounting and memory evictions

v2: Add the last part of the ref count fix which was spotted by
Philipp Sieweck where the ref count of cpu writers is off due to
ERESTARTSYS or EBUSY during bo waits.

The initial GEM port broke refcounting on shareable (prime) surfaces and
memory evictions. The prime surfaces broke because the parent surfaces
weren't increasing the ref count on GEM surfaces, which meant that
the memory backing textures could have been deleted while the texture
was still accessible. The evictions broke due to a typo, the code was
supposed to exit if the passed buffers were not vmw_buffer_object
not if they were. They're tied because the evictions depend on having
memory to actually evict.

This fixes crashes with XA state tracker which is used for xrender
acceleration on xf86-video-vmware, apps/tests which use a lot of
memory (a good test being the piglit's streaming-texture-leak) and
desktops.
Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Fixes: 8afa13a0 ("drm/vmwgfx: Implement DRIVER_GEM")
Reported-by: default avatarPhilipp Sieweck <psi@informatik.uni-kiel.de>
Cc: <stable@vger.kernel.org> # v5.17+
Reviewed-by: default avatarMaaz Mombasawala <mombasawalam@vmware.com>
Reviewed-by: default avatarMartin Krastev <krastevm@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220420040328.1007409-1-zack@kde.org
parent 4dee8eeb
...@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo) ...@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
return container_of(bo, struct vmw_buffer_object, base); return container_of(bo, struct vmw_buffer_object, base);
} }
/**
* bo_is_vmw - check if the buffer object is a &vmw_buffer_object
* @bo: ttm buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
* a &vmw_buffer_object.
*
* Returns:
* true if the object is of &vmw_buffer_object type, false if not.
*/
static bool bo_is_vmw(struct ttm_buffer_object *bo)
{
return bo->destroy == &vmw_bo_bo_free ||
bo->destroy == &vmw_gem_destroy;
}
/** /**
* vmw_bo_pin_in_placement - Validate a buffer to placement. * vmw_bo_pin_in_placement - Validate a buffer to placement.
...@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo); vmw_bo_unreference(&vbo);
if (unlikely(ret != 0 && ret != -ERESTARTSYS && if (unlikely(ret != 0)) {
ret != -EBUSY)) { if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
(unsigned int) arg->handle); (unsigned int) arg->handle);
return ret; return ret;
...@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
void vmw_bo_swap_notify(struct ttm_buffer_object *bo) void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{ {
/* Is @bo embedded in a struct vmw_buffer_object? */ /* Is @bo embedded in a struct vmw_buffer_object? */
if (vmw_bo_is_vmw_bo(bo)) if (!bo_is_vmw(bo))
return; return;
/* Kill any cached kernel maps before swapout */ /* Kill any cached kernel maps before swapout */
...@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct vmw_buffer_object *vbo; struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */ /* Make sure @bo is embedded in a struct vmw_buffer_object? */
if (vmw_bo_is_vmw_bo(bo)) if (!bo_is_vmw(bo))
return; return;
vbo = container_of(bo, struct vmw_buffer_object, base); vbo = container_of(bo, struct vmw_buffer_object, base);
...@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo); vmw_resource_unbind_list(vbo);
} }
/**
* vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
* @bo: buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
* a &vmw_buffer_object.
*
* Returns:
* true if the object is of &vmw_buffer_object type, false if not.
*/
bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &vmw_bo_bo_free ||
bo->destroy == &vmw_gem_destroy)
return true;
return false;
}
...@@ -998,13 +998,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) ...@@ -998,13 +998,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_no_fman; goto out_no_fman;
} }
drm_vma_offset_manager_init(&dev_priv->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver, ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
dev_priv->drm.dev, dev_priv->drm.dev,
dev_priv->drm.anon_inode->i_mapping, dev_priv->drm.anon_inode->i_mapping,
&dev_priv->vma_manager, dev_priv->drm.vma_offset_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent, dev_priv->map_mode == vmw_dma_alloc_coherent,
false); false);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -1174,7 +1171,6 @@ static void vmw_driver_unload(struct drm_device *dev) ...@@ -1174,7 +1171,6 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_devcaps_destroy(dev_priv); vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv); vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev); ttm_device_fini(&dev_priv->bdev);
drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv); vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman); vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
...@@ -1398,7 +1394,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr, ...@@ -1398,7 +1394,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
return drm_get_unmapped_area(file, uaddr, len, pgoff, flags, return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
&dev_priv->vma_manager); dev_priv->drm.vma_offset_manager);
} }
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
......
...@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) ...@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base); container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res; struct vmw_resource *res = &user_srf->srf.res;
if (base->shareable && res && res->backup)
drm_gem_object_put(&res->backup->base.base);
*p_base = NULL; *p_base = NULL;
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
} }
...@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
vmw_bo_reference(res->backup); vmw_bo_reference(res->backup);
drm_gem_object_get(&res->backup->base.base);
} }
tmp = vmw_resource_reference(&srf->res); tmp = vmw_resource_reference(&srf->res);
...@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
&res->backup); &res->backup);
if (ret == 0) if (ret == 0)
vmw_bo_reference(res->backup); vmw_bo_reference(res->backup);
} }
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node); drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size; rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle; rep->buffer_handle = backup_handle;
if (user_srf->prime.base.shareable)
drm_gem_object_get(&res->backup->base.base);
} else { } else {
rep->buffer_map_handle = 0; rep->buffer_map_handle = 0;
rep->buffer_size = 0; rep->buffer_size = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment