Commit 27e248c4 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'vmwgfx-fixes-5.2' of git://people.freedesktop.org/~thomash/linux into drm-fixes

A set of misc fixes for various issues that have surfaced recently.
All Cc'd stable except the dma iterator fix which shouldn't really cause
any real issues on older kernels.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: "Thomas Hellstrom (VMware)" <thomas@shipmail.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20190522115408.33185-1-thomas@shipmail.org
parents a188339c 5ed7f4b5
...@@ -174,7 +174,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, ...@@ -174,7 +174,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
kref_init(&base->refcount); kref_init(&base->refcount);
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
spin_lock(&tdev->object_lock); spin_lock(&tdev->object_lock);
ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT); ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
spin_unlock(&tdev->object_lock); spin_unlock(&tdev->object_lock);
idr_preload_end(); idr_preload_end();
if (ret < 0) if (ret < 0)
......
...@@ -1239,7 +1239,13 @@ static int vmw_master_set(struct drm_device *dev, ...@@ -1239,7 +1239,13 @@ static int vmw_master_set(struct drm_device *dev,
} }
dev_priv->active_master = vmaster; dev_priv->active_master = vmaster;
drm_sysfs_hotplug_event(dev);
/*
* Inform a new master that the layout may have changed while
* it was gone.
*/
if (!from_open)
drm_sysfs_hotplug_event(dev);
return 0; return 0;
} }
......
...@@ -296,7 +296,7 @@ struct vmw_sg_table { ...@@ -296,7 +296,7 @@ struct vmw_sg_table {
struct vmw_piter { struct vmw_piter {
struct page **pages; struct page **pages;
const dma_addr_t *addrs; const dma_addr_t *addrs;
struct sg_page_iter iter; struct sg_dma_page_iter iter;
unsigned long i; unsigned long i;
unsigned long num_pages; unsigned long num_pages;
bool (*next)(struct vmw_piter *); bool (*next)(struct vmw_piter *);
......
...@@ -2010,6 +2010,11 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, ...@@ -2010,6 +2010,11 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return 0; return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) { if (cmd->body.shid != SVGA3D_INVALID_ID) {
/*
* This is the compat shader path - Per device guest-backed
* shaders, but user-space thinks it's per context host-
* backed shaders.
*/
res = vmw_shader_lookup(vmw_context_res_man(ctx), res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, cmd->body.type); cmd->body.shid, cmd->body.type);
if (!IS_ERR(res)) { if (!IS_ERR(res)) {
...@@ -2017,6 +2022,14 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, ...@@ -2017,6 +2022,14 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
VMW_RES_DIRTY_NONE); VMW_RES_DIRTY_NONE);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = vmw_resource_relocation_add
(sw_context, res,
vmw_ptr_diff(sw_context->buf_start,
&cmd->body.shid),
vmw_res_rel_normal);
if (unlikely(ret != 0))
return ret;
} }
} }
...@@ -2193,7 +2206,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, ...@@ -2193,7 +2206,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header); cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
VMW_DEBUG_USER("Illegal shader type %u.\n", VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type); (unsigned int) cmd->body.type);
return -EINVAL; return -EINVAL;
...@@ -2414,6 +2428,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, ...@@ -2414,6 +2428,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
return -EINVAL; return -EINVAL;
cmd = container_of(header, typeof(*cmd), header); cmd = container_of(header, typeof(*cmd), header);
if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
VMW_DEBUG_USER("Invalid surface id.\n");
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
VMW_RES_DIRTY_NONE, user_surface_converter, VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, &srf); &cmd->sid, &srf);
......
...@@ -266,7 +266,9 @@ static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) ...@@ -266,7 +266,9 @@ static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
static bool __vmw_piter_sg_next(struct vmw_piter *viter) static bool __vmw_piter_sg_next(struct vmw_piter *viter)
{ {
return __sg_page_iter_next(&viter->iter); bool ret = __vmw_piter_non_sg_next(viter);
return __sg_page_iter_dma_next(&viter->iter) && ret;
} }
...@@ -284,12 +286,6 @@ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) ...@@ -284,12 +286,6 @@ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
return viter->pages[viter->i]; return viter->pages[viter->i];
} }
static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
{
return sg_page_iter_page(&viter->iter);
}
/** /**
* Helper functions to return the DMA address of the current page. * Helper functions to return the DMA address of the current page.
* *
...@@ -311,13 +307,7 @@ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) ...@@ -311,13 +307,7 @@ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
{ {
/* return sg_page_iter_dma_address(&viter->iter);
* FIXME: This driver wrongly mixes DMA and CPU SG list iteration and
* needs revision. See
* https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/
*/
return sg_page_iter_dma_address(
container_of(&viter->iter, struct sg_dma_page_iter, base));
} }
...@@ -336,26 +326,23 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, ...@@ -336,26 +326,23 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
{ {
viter->i = p_offset - 1; viter->i = p_offset - 1;
viter->num_pages = vsgt->num_pages; viter->num_pages = vsgt->num_pages;
viter->page = &__vmw_piter_non_sg_page;
viter->pages = vsgt->pages;
switch (vsgt->mode) { switch (vsgt->mode) {
case vmw_dma_phys: case vmw_dma_phys:
viter->next = &__vmw_piter_non_sg_next; viter->next = &__vmw_piter_non_sg_next;
viter->dma_address = &__vmw_piter_phys_addr; viter->dma_address = &__vmw_piter_phys_addr;
viter->page = &__vmw_piter_non_sg_page;
viter->pages = vsgt->pages;
break; break;
case vmw_dma_alloc_coherent: case vmw_dma_alloc_coherent:
viter->next = &__vmw_piter_non_sg_next; viter->next = &__vmw_piter_non_sg_next;
viter->dma_address = &__vmw_piter_dma_addr; viter->dma_address = &__vmw_piter_dma_addr;
viter->page = &__vmw_piter_non_sg_page;
viter->addrs = vsgt->addrs; viter->addrs = vsgt->addrs;
viter->pages = vsgt->pages;
break; break;
case vmw_dma_map_populate: case vmw_dma_map_populate:
case vmw_dma_map_bind: case vmw_dma_map_bind:
viter->next = &__vmw_piter_sg_next; viter->next = &__vmw_piter_sg_next;
viter->dma_address = &__vmw_piter_sg_addr; viter->dma_address = &__vmw_piter_sg_addr;
viter->page = &__vmw_piter_sg_page; __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
vsgt->sgt->orig_nents, p_offset); vsgt->sgt->orig_nents, p_offset);
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment