Commit 459d0fa7 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Introduce a pin count to allow for recursive pinning v2

v2: Fix dma buffer validation on resource pinning.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
parent f89c6c32
...@@ -32,25 +32,20 @@ ...@@ -32,25 +32,20 @@
/** /**
* vmw_dmabuf_to_placement - Validate a buffer to placement. * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
* *
* @dev_priv: Driver private. * @dev_priv: Driver private.
* @buf: DMA buffer to move. * @buf: DMA buffer to move.
* @pin: Pin buffer if true. * @placement: The placement to pin it.
* @interruptible: Use interruptible wait. * @interruptible: Use interruptible wait.
* *
* May only be called by the current master since it assumes that the
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* Returns * Returns
* -ERESTARTSYS if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_dma_buffer *buf,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible) bool interruptible)
{ {
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
int ret; int ret;
...@@ -66,6 +61,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, ...@@ -66,6 +61,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
goto err; goto err;
ret = ttm_bo_validate(bo, placement, interruptible, false); ret = ttm_bo_validate(bo, placement, interruptible, false);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
...@@ -75,12 +72,10 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, ...@@ -75,12 +72,10 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
} }
/** /**
* vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr. * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
* *
* May only be called by the current master since it assumes that the * This function takes the reservation_sem in write mode.
* master lock is the current master's lock. * Flushes and unpins the query bo to avoid failures.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
* *
* @dev_priv: Driver private. * @dev_priv: Driver private.
* @buf: DMA buffer to move. * @buf: DMA buffer to move.
...@@ -90,55 +85,34 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, ...@@ -90,55 +85,34 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
* Returns * Returns
* -ERESTARTSYS if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_dma_buffer *buf,
bool pin, bool interruptible) bool interruptible)
{ {
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement *placement;
int ret; int ret;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
if (pin) vmw_execbuf_release_pinned_bo(dev_priv);
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err; goto err;
/** ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
* Put BO in VRAM if there is space, otherwise as a GMR. false);
* If there is no space in VRAM and GMR ids are all used up,
* start evicting GMRs to make room. If the DMA buffer can't be
* used as a GMR, this will return -ENOMEM.
*/
if (pin)
placement = &vmw_vram_gmr_ne_placement;
else
placement = &vmw_vram_gmr_placement;
ret = ttm_bo_validate(bo, placement, interruptible, false);
if (likely(ret == 0) || ret == -ERESTARTSYS) if (likely(ret == 0) || ret == -ERESTARTSYS)
goto err_unreserve; goto out_unreserve;
/** ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
* If that failed, try VRAM again, this time evicting
* previous contents.
*/
if (pin)
placement = &vmw_vram_ne_placement;
else
placement = &vmw_vram_placement;
ret = ttm_bo_validate(bo, placement, interruptible, false); out_unreserve:
if (!ret)
vmw_bo_pin_reserved(buf, true);
err_unreserve:
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
err: err:
ttm_write_unlock(&dev_priv->reservation_sem); ttm_write_unlock(&dev_priv->reservation_sem);
...@@ -146,67 +120,50 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, ...@@ -146,67 +120,50 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
} }
/** /**
* vmw_dmabuf_to_vram - Move a buffer to vram. * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
* *
* May only be called by the current master since it assumes that the * This function takes the reservation_sem in write mode.
* master lock is the current master's lock. * Flushes and unpins the query bo to avoid failures.
* This function takes the master's lock in write mode.
* *
* @dev_priv: Driver private. * @dev_priv: Driver private.
* @buf: DMA buffer to move. * @buf: DMA buffer to move.
* @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait. * @interruptible: Use interruptible wait.
* *
* Returns * Returns
* -ERESTARTSYS if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
int vmw_dmabuf_to_vram(struct vmw_private *dev_priv, int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_dma_buffer *buf,
bool pin, bool interruptible) bool interruptible)
{ {
struct ttm_placement *placement; return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
interruptible);
if (pin)
placement = &vmw_vram_ne_placement;
else
placement = &vmw_vram_placement;
return vmw_dmabuf_to_placement(dev_priv, buf,
placement,
interruptible);
} }
/** /**
* vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram. * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
* *
* May only be called by the current master since it assumes that the * This function takes the reservation_sem in write mode.
* master lock is the current master's lock. * Flushes and unpins the query bo to avoid failures.
* This function takes the master's lock in write mode.
* Flushes and unpins the query bo if @pin == true to avoid failures.
* *
* @dev_priv: Driver private. * @dev_priv: Driver private.
* @buf: DMA buffer to move. * @buf: DMA buffer to pin.
* @pin: Pin buffer in vram if true.
* @interruptible: Use interruptible wait. * @interruptible: Use interruptible wait.
* *
* Returns * Returns
* -ERESTARTSYS if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_dma_buffer *buf,
bool pin, bool interruptible) bool interruptible)
{ {
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_place place; struct ttm_place place;
int ret = 0; int ret = 0;
if (pin) place = vmw_vram_placement.placement[0];
place = vmw_vram_ne_placement.placement[0];
else
place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages; place.lpfn = bo->num_pages;
placement.num_placement = 1; placement.num_placement = 1;
placement.placement = &place; placement.placement = &place;
placement.num_busy_placement = 1; placement.num_busy_placement = 1;
...@@ -216,13 +173,16 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, ...@@ -216,13 +173,16 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
if (pin) vmw_execbuf_release_pinned_bo(dev_priv);
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unlock; goto err_unlock;
/* Is this buffer already in vram but not at the start of it? */ /*
* Is this buffer already in vram but not at the start of it?
* In that case, evict it first because TTM isn't good at handling
* that situation.
*/
if (bo->mem.mem_type == TTM_PL_VRAM && if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages && bo->mem.start < bo->num_pages &&
bo->mem.start > 0) bo->mem.start > 0)
...@@ -230,8 +190,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, ...@@ -230,8 +190,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
ret = ttm_bo_validate(bo, &placement, interruptible, false); ret = ttm_bo_validate(bo, &placement, interruptible, false);
/* For some reason we didn't up at the start of vram */ /* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0); WARN_ON(ret == 0 && bo->offset != 0);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
err_unlock: err_unlock:
...@@ -240,13 +202,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, ...@@ -240,13 +202,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
return ret; return ret;
} }
/** /**
* vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer. * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
* *
* May only be called by the current master since it assumes that the * This function takes the reservation_sem in write mode.
* master lock is the current master's lock.
* This function takes the master's lock in write mode.
* *
* @dev_priv: Driver private. * @dev_priv: Driver private.
* @buf: DMA buffer to unpin. * @buf: DMA buffer to unpin.
...@@ -259,16 +218,25 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv, ...@@ -259,16 +218,25 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_dma_buffer *buf,
bool interruptible) bool interruptible)
{ {
/* struct ttm_buffer_object *bo = &buf->base;
* We could in theory early out if the buffer is int ret;
* unpinned but we need to lock and reserve the buffer
* anyways so we don't gain much by that.
*/
return vmw_dmabuf_to_placement(dev_priv, buf,
&vmw_evictable_placement,
interruptible);
}
ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
vmw_bo_pin_reserved(buf, false);
ttm_bo_unreserve(bo);
err:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
/** /**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
...@@ -291,21 +259,31 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, ...@@ -291,21 +259,31 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
/** /**
* vmw_bo_pin - Pin or unpin a buffer object without moving it. * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
* *
* @bo: The buffer object. Must be reserved. * @vbo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin. * @pin: Whether to pin or unpin.
* *
*/ */
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{ {
struct ttm_place pl; struct ttm_place pl;
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
uint32_t old_mem_type = bo->mem.mem_type; uint32_t old_mem_type = bo->mem.mem_type;
int ret; int ret;
lockdep_assert_held(&bo->resv->lock.base); lockdep_assert_held(&bo->resv->lock.base);
if (pin) {
if (vbo->pin_count++ > 0)
return;
} else {
WARN_ON(vbo->pin_count <= 0);
if (--vbo->pin_count > 0)
return;
}
pl.fpfn = 0; pl.fpfn = 0;
pl.lpfn = 0; pl.lpfn = 0;
pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
......
...@@ -298,30 +298,31 @@ static void vmw_print_capabilities(uint32_t capabilities) ...@@ -298,30 +298,31 @@ static void vmw_print_capabilities(uint32_t capabilities)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{ {
int ret; int ret;
struct ttm_buffer_object *bo; struct vmw_dma_buffer *vbo;
struct ttm_bo_kmap_obj map; struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result; volatile SVGA3dQueryResult *result;
bool dummy; bool dummy;
/* /*
* Create the bo as pinned, so that a tryreserve will * Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only * immediately succeed. This is because we're the only
* user of the bo currently. * user of the bo currently.
*/ */
ret = ttm_bo_create(&dev_priv->bdev, vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
PAGE_SIZE, if (!vbo)
ttm_bo_type_device, return -ENOMEM;
&vmw_sys_ne_placement,
0, false, NULL,
&bo);
ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
&vmw_sys_ne_placement, false,
&vmw_dmabuf_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_bo_reserve(bo, false, true, false, NULL); ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vmw_bo_pin_reserved(vbo, true);
ret = ttm_bo_kmap(bo, 0, 1, &map); ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
if (likely(ret == 0)) { if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy); result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result); result->totalSize = sizeof(*result);
...@@ -329,14 +330,14 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ...@@ -329,14 +330,14 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
result->result32 = 0xff; result->result32 = 0xff;
ttm_bo_kunmap(&map); ttm_bo_kunmap(&map);
} }
vmw_bo_pin(bo, false); vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(bo); ttm_bo_unreserve(&vbo->base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n"); DRM_ERROR("Dummy query buffer map failed.\n");
ttm_bo_unref(&bo); vmw_dmabuf_unreference(&vbo);
} else } else
dev_priv->dummy_query_bo = bo; dev_priv->dummy_query_bo = vbo;
return ret; return ret;
} }
...@@ -434,7 +435,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv) ...@@ -434,7 +435,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL); BUG_ON(dev_priv->pinned_bo != NULL);
ttm_bo_unref(&dev_priv->dummy_query_bo); vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
if (dev_priv->cman) if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman); vmw_cmdbuf_remove_pool(dev_priv->cman);
......
...@@ -85,6 +85,7 @@ struct vmw_fpriv { ...@@ -85,6 +85,7 @@ struct vmw_fpriv {
struct vmw_dma_buffer { struct vmw_dma_buffer {
struct ttm_buffer_object base; struct ttm_buffer_object base;
struct list_head res_list; struct list_head res_list;
s32 pin_count;
}; };
/** /**
...@@ -358,7 +359,7 @@ struct vmw_sw_context{ ...@@ -358,7 +359,7 @@ struct vmw_sw_context{
uint32_t *cmd_bounce; uint32_t *cmd_bounce;
uint32_t cmd_bounce_size; uint32_t cmd_bounce_size;
struct list_head resource_list; struct list_head resource_list;
struct ttm_buffer_object *cur_query_bo; struct vmw_dma_buffer *cur_query_bo;
struct list_head res_relocations; struct list_head res_relocations;
uint32_t *buf_start; uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max]; struct vmw_res_cache_entry res_cache[vmw_res_max];
...@@ -533,8 +534,8 @@ struct vmw_private { ...@@ -533,8 +534,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex. * are protected by the cmdbuf mutex.
*/ */
struct ttm_buffer_object *dummy_query_bo; struct vmw_dma_buffer *dummy_query_bo;
struct ttm_buffer_object *pinned_bo; struct vmw_dma_buffer *pinned_bo;
uint32_t query_cid; uint32_t query_cid;
uint32_t query_cid_valid; uint32_t query_cid_valid;
bool dummy_query_bo_pinned; bool dummy_query_bo_pinned;
...@@ -700,25 +701,25 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv); ...@@ -700,25 +701,25 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
/** /**
* DMA buffer helper routines - vmwgfx_dmabuf.c * DMA buffer helper routines - vmwgfx_dmabuf.c
*/ */
extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv, extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
struct ttm_placement *placement,
bool interruptible);
extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible);
extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible);
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo, struct vmw_dma_buffer *bo,
bool pin, bool interruptible); struct ttm_placement *placement,
bool interruptible);
extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible);
extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible);
extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool interruptible);
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv, extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo, struct vmw_dma_buffer *bo,
bool interruptible); bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr); SVGAGuestPtr *ptr);
extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin); extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
/** /**
* Misc Ioctl functionality - vmwgfx_ioctl.c * Misc Ioctl functionality - vmwgfx_ioctl.c
......
...@@ -308,7 +308,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, ...@@ -308,7 +308,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
* submission is reached. * submission is reached.
*/ */
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo, struct vmw_dma_buffer *vbo,
bool validate_as_mob, bool validate_as_mob,
uint32_t *p_val_node) uint32_t *p_val_node)
{ {
...@@ -318,7 +318,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, ...@@ -318,7 +318,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct drm_hash_item *hash; struct drm_hash_item *hash;
int ret; int ret;
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
&hash) == 0)) { &hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer, vval_buf = container_of(hash, struct vmw_validate_buffer,
hash); hash);
...@@ -336,7 +336,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, ...@@ -336,7 +336,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
return -EINVAL; return -EINVAL;
} }
vval_buf = &sw_context->val_bufs[val_node]; vval_buf = &sw_context->val_bufs[val_node];
vval_buf->hash.key = (unsigned long) bo; vval_buf->hash.key = (unsigned long) vbo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash); ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation " DRM_ERROR("Failed to initialize a buffer validation "
...@@ -345,7 +345,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, ...@@ -345,7 +345,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
} }
++sw_context->cur_val_buf; ++sw_context->cur_val_buf;
val_buf = &vval_buf->base; val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo); val_buf->bo = ttm_bo_reference(&vbo->base);
val_buf->shared = false; val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes); list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob; vval_buf->validate_as_mob = validate_as_mob;
...@@ -380,10 +380,10 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) ...@@ -380,10 +380,10 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret; return ret;
if (res->backup) { if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base; struct vmw_dma_buffer *vbo = res->backup;
ret = vmw_bo_to_validate_list ret = vmw_bo_to_validate_list
(sw_context, bo, (sw_context, vbo,
vmw_resource_needs_backup(res), NULL); vmw_resource_needs_backup(res), NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -759,7 +759,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, ...@@ -759,7 +759,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* command batch. * command batch.
*/ */
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct ttm_buffer_object *new_query_bo, struct vmw_dma_buffer *new_query_bo,
struct vmw_sw_context *sw_context) struct vmw_sw_context *sw_context)
{ {
struct vmw_res_cache_entry *ctx_entry = struct vmw_res_cache_entry *ctx_entry =
...@@ -771,7 +771,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, ...@@ -771,7 +771,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) { if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(new_query_bo->num_pages > 4)) { if (unlikely(new_query_bo->base.num_pages > 4)) {
DRM_ERROR("Query buffer too large.\n"); DRM_ERROR("Query buffer too large.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -840,12 +840,12 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -840,12 +840,12 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo != sw_context->cur_query_bo) { if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) { if (dev_priv->pinned_bo) {
vmw_bo_pin(dev_priv->pinned_bo, false); vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
ttm_bo_unref(&dev_priv->pinned_bo); vmw_dmabuf_unreference(&dev_priv->pinned_bo);
} }
if (!sw_context->needs_post_query_barrier) { if (!sw_context->needs_post_query_barrier) {
vmw_bo_pin(sw_context->cur_query_bo, true); vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
/* /*
* We pin also the dummy_query_bo buffer so that we * We pin also the dummy_query_bo buffer so that we
...@@ -853,14 +853,17 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -853,14 +853,17 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* dummy queries in context destroy paths. * dummy queries in context destroy paths.
*/ */
vmw_bo_pin(dev_priv->dummy_query_bo, true); if (!dev_priv->dummy_query_bo_pinned) {
dev_priv->dummy_query_bo_pinned = true; vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
true);
dev_priv->dummy_query_bo_pinned = true;
}
BUG_ON(sw_context->last_query_ctx == NULL); BUG_ON(sw_context->last_query_ctx == NULL);
dev_priv->query_cid = sw_context->last_query_ctx->id; dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true; dev_priv->query_cid_valid = true;
dev_priv->pinned_bo = dev_priv->pinned_bo =
ttm_bo_reference(sw_context->cur_query_bo); vmw_dmabuf_reference(sw_context->cur_query_bo);
} }
} }
} }
...@@ -889,7 +892,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -889,7 +892,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_dma_buffer **vmw_bo_p) struct vmw_dma_buffer **vmw_bo_p)
{ {
struct vmw_dma_buffer *vmw_bo = NULL; struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
uint32_t handle = *id; uint32_t handle = *id;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
...@@ -900,7 +902,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -900,7 +902,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
ret = -EINVAL; ret = -EINVAL;
goto out_no_reloc; goto out_no_reloc;
} }
bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission" DRM_ERROR("Max number relocations per submission"
...@@ -913,7 +914,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -913,7 +914,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
reloc->mob_loc = id; reloc->mob_loc = id;
reloc->location = NULL; reloc->location = NULL;
ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_reloc; goto out_no_reloc;
...@@ -951,7 +952,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -951,7 +952,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_dma_buffer **vmw_bo_p) struct vmw_dma_buffer **vmw_bo_p)
{ {
struct vmw_dma_buffer *vmw_bo = NULL; struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId; uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
...@@ -962,7 +962,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -962,7 +962,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
ret = -EINVAL; ret = -EINVAL;
goto out_no_reloc; goto out_no_reloc;
} }
bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission" DRM_ERROR("Max number relocations per submission"
...@@ -974,7 +973,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -974,7 +973,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++]; reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr; reloc->location = ptr;
ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_reloc; goto out_no_reloc;
...@@ -1081,7 +1080,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ...@@ -1081,7 +1080,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo); vmw_dmabuf_unreference(&vmw_bo);
return ret; return ret;
...@@ -1135,7 +1134,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -1135,7 +1134,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo); vmw_dmabuf_unreference(&vmw_bo);
return ret; return ret;
...@@ -2239,16 +2238,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, ...@@ -2239,16 +2238,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
bool validate_as_mob) bool validate_as_mob)
{ {
struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
base);
int ret; int ret;
if (vbo->pin_count > 0)
/*
* Don't validate pinned buffers.
*/
if (bo == dev_priv->pinned_bo ||
(bo == dev_priv->dummy_query_bo &&
dev_priv->dummy_query_bo_pinned))
return 0; return 0;
if (validate_as_mob) if (validate_as_mob)
...@@ -2767,9 +2761,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) ...@@ -2767,9 +2761,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
vmw_bo_pin(dev_priv->pinned_bo, false); vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false); if (dev_priv->dummy_query_bo_pinned) {
dev_priv->dummy_query_bo_pinned = false; vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
}
} }
...@@ -2811,11 +2807,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -2811,11 +2807,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&validate_list); INIT_LIST_HEAD(&validate_list);
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
pinned_val.shared = false; pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list); list_add_tail(&pinned_val.head, &validate_list);
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
query_val.shared = false; query_val.shared = false;
list_add_tail(&query_val.head, &validate_list); list_add_tail(&query_val.head, &validate_list);
...@@ -2836,10 +2832,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -2836,10 +2832,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
dev_priv->query_cid_valid = false; dev_priv->query_cid_valid = false;
} }
vmw_bo_pin(dev_priv->pinned_bo, false); vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
vmw_bo_pin(dev_priv->dummy_query_bo, false); if (dev_priv->dummy_query_bo_pinned) {
dev_priv->dummy_query_bo_pinned = false; vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
dev_priv->dummy_query_bo_pinned = false;
}
if (fence == NULL) { if (fence == NULL) {
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence, (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
NULL); NULL);
...@@ -2851,7 +2848,9 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -2851,7 +2848,9 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo); ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo); vmw_dmabuf_unreference(&dev_priv->pinned_bo);
DRM_INFO("Dummy query bo pin count: %d\n",
dev_priv->dummy_query_bo->pin_count);
out_unlock: out_unlock:
return; return;
...@@ -2861,7 +2860,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -2861,7 +2860,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
out_no_reserve: out_no_reserve:
ttm_bo_unref(&query_val.bo); ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&pinned_val.bo);
ttm_bo_unref(&dev_priv->pinned_bo); vmw_dmabuf_unreference(&dev_priv->pinned_bo);
} }
/** /**
......
...@@ -636,7 +636,7 @@ int vmw_fb_on(struct vmw_private *vmw_priv) ...@@ -636,7 +636,7 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
/* Make sure that all overlays are stoped when we take over */ /* Make sure that all overlays are stoped when we take over */
vmw_overlay_stop_all(vmw_priv); vmw_overlay_stop_all(vmw_priv);
ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false); ret = vmw_dmabuf_pin_in_start_of_vram(vmw_priv, par->vmw_bo, false);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("could not move buffer to start of VRAM\n"); DRM_ERROR("could not move buffer to start of VRAM\n");
goto err_no_buffer; goto err_no_buffer;
......
...@@ -604,7 +604,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, ...@@ -604,7 +604,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
* without writing to the query result structure. * without writing to the query result structure.
*/ */
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body; SVGA3dCmdWaitForQuery body;
...@@ -653,7 +653,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, ...@@ -653,7 +653,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
* without writing to the query result structure. * without writing to the query result structure.
*/ */
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body; SVGA3dCmdWaitForGBQuery body;
......
...@@ -678,7 +678,7 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) ...@@ -678,7 +678,7 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
vmw_overlay_pause_all(dev_priv); vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false); ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, vfbd->buffer, false);
vmw_overlay_resume_all(dev_priv); vmw_overlay_resume_all(dev_priv);
......
...@@ -232,9 +232,9 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, ...@@ -232,9 +232,9 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
return vmw_dmabuf_unpin(dev_priv, buf, inter); return vmw_dmabuf_unpin(dev_priv, buf, inter);
if (dev_priv->active_display_unit == vmw_du_legacy) if (dev_priv->active_display_unit == vmw_du_legacy)
return vmw_dmabuf_to_vram(dev_priv, buf, true, inter); return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter); return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
} }
/** /**
......
...@@ -1596,25 +1596,29 @@ int vmw_resource_pin(struct vmw_resource *res) ...@@ -1596,25 +1596,29 @@ int vmw_resource_pin(struct vmw_resource *res)
goto out_no_reserve; goto out_no_reserve;
if (res->pin_count == 0) { if (res->pin_count == 0) {
struct ttm_buffer_object *bo = NULL; struct vmw_dma_buffer *vbo = NULL;
if (res->backup) { if (res->backup) {
bo = &res->backup->base; vbo = res->backup;
ttm_bo_reserve(bo, false, false, false, NULL); ttm_bo_reserve(&vbo->base, false, false, false, NULL);
ret = ttm_bo_validate(bo, res->func->backup_placement, if (!vbo->pin_count) {
false, false); ret = ttm_bo_validate
if (ret) { (&vbo->base,
ttm_bo_unreserve(bo); res->func->backup_placement,
goto out_no_validate; false, false);
if (ret) {
ttm_bo_unreserve(&vbo->base);
goto out_no_validate;
}
} }
/* Do we really need to pin the MOB as well? */ /* Do we really need to pin the MOB as well? */
vmw_bo_pin(bo, true); vmw_bo_pin_reserved(vbo, true);
} }
ret = vmw_resource_validate(res); ret = vmw_resource_validate(res);
if (bo) if (vbo)
ttm_bo_unreserve(bo); ttm_bo_unreserve(&vbo->base);
if (ret) if (ret)
goto out_no_validate; goto out_no_validate;
} }
...@@ -1650,11 +1654,11 @@ void vmw_resource_unpin(struct vmw_resource *res) ...@@ -1650,11 +1654,11 @@ void vmw_resource_unpin(struct vmw_resource *res)
WARN_ON(res->pin_count == 0); WARN_ON(res->pin_count == 0);
if (--res->pin_count == 0 && res->backup) { if (--res->pin_count == 0 && res->backup) {
struct ttm_buffer_object *bo = &res->backup->base; struct vmw_dma_buffer *vbo = res->backup;
ttm_bo_reserve(bo, false, false, false, NULL); ttm_bo_reserve(&vbo->base, false, false, false, NULL);
vmw_bo_pin(bo, false); vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(bo); ttm_bo_unreserve(&vbo->base);
} }
vmw_resource_unreserve(res, NULL, 0UL); vmw_resource_unreserve(res, NULL, 0UL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment