Commit 9c079b8c authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Adapt execbuf to the new validation api

Strip the old execbuf validation functionality and use the new API instead.
Also use the new API for a now removed execbuf function that was called
from the kms code.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
parent 84e1bf06
......@@ -28,6 +28,7 @@
#ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_
#include "vmwgfx_validation.h"
#include "vmwgfx_reg.h"
#include <drm/drmP.h>
#include <drm/vmwgfx_drm.h>
......@@ -207,26 +208,27 @@ struct vmw_fifo_state {
struct vmw_relocation {
SVGAMobId *mob_loc;
SVGAGuestPtr *location;
uint32_t index;
struct vmw_buffer_object *vbo;
};
/**
* struct vmw_res_cache_entry - resource information cache entry
*
* @handle: User-space handle of a resource.
* @res: Non-ref-counted pointer to the resource.
* @valid_handle: Whether the @handle member is valid.
* @valid: Whether the entry is valid, which also implies that the execbuf
* code holds a reference to the resource, and it's placed on the
* validation list.
* @handle: User-space handle of a resource.
* @res: Non-ref-counted pointer to the resource.
*
* Used to avoid frequent repeated user-space handle lookups of the
* same resource.
*/
struct vmw_res_cache_entry {
bool valid;
uint32_t handle;
struct vmw_resource *res;
struct vmw_resource_val_node *node;
void *private;
unsigned short valid_handle;
unsigned short valid;
};
/**
......@@ -291,21 +293,52 @@ enum vmw_display_unit_type {
vmw_du_screen_target
};
struct vmw_validation_context;
struct vmw_ctx_validation_info;
/**
* struct vmw_sw_context - Command submission context
* @res_ht: Pointer hash table used to find validation duplicates
* @kernel: Whether the command buffer originates from kernel code rather
* than from user-space
* @fp: If @kernel is false, points to the file of the client. Otherwise
* NULL
* @relocs: Array of buffer object relocations
* @cur_reloc: Cursor pointing to the current relocation
* @cmd_bounce: Command bounce buffer used for command validation before
* copying to fifo space
* @cmd_bounce_size: Current command bounce buffer size
* @cur_query_bo: Current buffer object used as query result buffer
* @res_relocations: List of resource relocations
* @buf_start: Pointer to start of memory where command validation takes
* place
* @res_cache: Cache of recently looked up resources
* @last_query_ctx: Last context that submitted a query
* @needs_post_query_barrier: Whether a query barrier is needed after
* command submission
* @error_resource: Pointer to hold a reference to the resource causing
* an error
* @staged_bindings: Cached per-context binding tracker
* @staged_bindings_inuse: Whether the cached per-context binding tracker
* is in use
* @staged_cmd_res: List of staged command buffer managed resources in this
* command buffer
* @ctx_list: List of context resources referenced in this command buffer
* @dx_ctx_node: Validation metadata of the current DX context
* @dx_query_mob: The MOB used for DX queries
* @dx_query_ctx: The DX context used for the last DX query
* @man: Pointer to the command buffer managed resource manager
* @ctx: The validation context
*/
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
bool kernel;
struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct list_head resource_list;
struct list_head ctx_resource_list; /* For contexts and cotables */
struct vmw_buffer_object *cur_query_bo;
struct list_head res_relocations;
uint32_t *buf_start;
......@@ -316,10 +349,12 @@ struct vmw_sw_context{
struct vmw_ctx_binding_state *staged_bindings;
bool staged_bindings_inuse;
struct list_head staged_cmd_res;
struct vmw_resource_val_node *dx_ctx_node;
struct list_head ctx_list;
struct vmw_ctx_validation_info *dx_ctx_node;
struct vmw_buffer_object *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
struct vmw_validation_context *ctx;
};
struct vmw_legacy_display;
......@@ -864,10 +899,6 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
uint32_t fence_handle,
int32_t out_fence_fd,
struct sync_file *sync_file);
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
......
......@@ -69,35 +69,18 @@ struct vmw_resource_relocation {
enum vmw_resource_relocation_type rel_type:3;
};
/**
* struct vmw_resource_val_node - Validation info for resources
*
* @head: List head for the software context's resource list.
* @hash: Hash entry for quick resouce to val_node lookup.
* @res: Ref-counted pointer to the resource.
* @switch_backup: Boolean whether to switch backup buffer on unreserve.
* @new_backup: Refcounted pointer to the new backup buffer.
* @staged_bindings: If @res is a context, tracks bindings set up during
* the command batch. Otherwise NULL.
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
* @switching_backup: The command stream provides a new backup buffer for a
* resource.
* @no_buffer_needed: This means @switching_backup is true on first buffer
* reference. So resource reservation does not need to allocate a backup
* buffer for the resource.
/*
* struct vmw_ctx_validation_info - Extra validation metadata for contexts
* @head: List head of context list
* @ctx: The context resource
* @cur: The context's persistent binding state
* @staged: The binding state changes of this command buffer
*/
struct vmw_resource_val_node {
struct vmw_ctx_validation_info {
struct list_head head;
struct drm_hash_item hash;
struct vmw_resource *res;
struct vmw_buffer_object *new_backup;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
u32 first_usage : 1;
u32 switching_backup : 1;
u32 no_buffer_needed : 1;
struct vmw_resource *ctx;
struct vmw_ctx_binding_state *cur;
struct vmw_ctx_binding_state *staged;
};
/**
......@@ -127,10 +110,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p);
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
/**
* vmw_ptr_diff - Compute the offset from a to b in bytes
*
......@@ -145,65 +124,55 @@ static size_t vmw_ptr_diff(void *a, void *b)
}
/**
* vmw_resources_unreserve - unreserve resources previously reserved for
* command submission.
*
* @sw_context: pointer to the software context
* @backoff: Whether command submission failed.
* vmw_execbuf_bindings_commit - Commit modified binding state
* @sw_context: The command submission context
* @backoff: Whether this is part of the error path and binding state
* changes should be ignored
*/
static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
bool backoff)
static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
bool backoff)
{
struct vmw_resource_val_node *val;
struct list_head *list = &sw_context->resource_list;
struct vmw_ctx_validation_info *entry, *next;
if (sw_context->dx_query_mob && !backoff)
vmw_context_bind_dx_query(sw_context->dx_query_ctx,
sw_context->dx_query_mob);
list_for_each_entry_safe(entry, next, &sw_context->ctx_list, head) {
list_del(&entry->head);
list_for_each_entry(val, list, head) {
struct vmw_resource *res = val->res;
bool switch_backup =
(backoff) ? false : val->switching_backup;
/*
* Transfer staged context bindings to the
* persistent context binding tracker.
*/
if (unlikely(val->staged_bindings)) {
if (!backoff) {
vmw_binding_state_commit
(vmw_context_binding_state(val->res),
val->staged_bindings);
}
if (val->staged_bindings != sw_context->staged_bindings)
vmw_binding_state_free(val->staged_bindings);
else
sw_context->staged_bindings_inuse = false;
val->staged_bindings = NULL;
}
vmw_resource_unreserve(res, switch_backup, val->new_backup,
val->new_backup_offset);
vmw_bo_unreference(&val->new_backup);
if (!backoff)
vmw_binding_state_commit(entry->cur, entry->staged);
if (entry->staged != sw_context->staged_bindings)
vmw_binding_state_free(entry->staged);
else
sw_context->staged_bindings_inuse = false;
}
}
/**
* vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
* @sw_context: The command submission context
*/
static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
{
if (sw_context->dx_query_mob)
vmw_context_bind_dx_query(sw_context->dx_query_ctx,
sw_context->dx_query_mob);
}
/**
* vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
* added to the validate list.
*
* @dev_priv: Pointer to the device private:
* @sw_context: The validation context:
* @node: The validation node holding this context.
* @sw_context: The command submission context
* @node: The validation node holding the context resource metadata
*/
static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource_val_node *node)
struct vmw_resource *res,
struct vmw_ctx_validation_info *node)
{
int ret;
ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
goto out_err;
......@@ -220,19 +189,23 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
}
if (sw_context->staged_bindings_inuse) {
node->staged_bindings = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(node->staged_bindings)) {
node->staged = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(node->staged)) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
ret = PTR_ERR(node->staged_bindings);
node->staged_bindings = NULL;
ret = PTR_ERR(node->staged);
node->staged = NULL;
goto out_err;
}
} else {
node->staged_bindings = sw_context->staged_bindings;
node->staged = sw_context->staged_bindings;
sw_context->staged_bindings_inuse = true;
}
node->ctx = res;
node->cur = vmw_context_binding_state(res);
list_add_tail(&node->head, &sw_context->ctx_list);
return 0;
out_err:
return ret;
......@@ -248,61 +221,42 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
* struct vmw_resource_val_node, if non-NULL on entry.
*/
static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *res,
struct vmw_resource_val_node **p_node)
struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_resource_val_node *node;
struct drm_hash_item *hash;
int ret;
enum vmw_res_type res_type = vmw_res_type(res);
struct vmw_res_cache_entry *rcache;
struct vmw_ctx_validation_info *ctx_info;
bool first_usage;
size_t priv_size;
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
&hash) == 0)) {
node = container_of(hash, struct vmw_resource_val_node, hash);
node->first_usage = false;
if (unlikely(p_node != NULL))
*p_node = node;
return 0;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node)) {
DRM_ERROR("Failed to allocate a resource validation "
"entry.\n");
return -ENOMEM;
}
/*
* If the resource is a context, set up structures to track
* context bindings.
*/
priv_size = (res_type == vmw_res_dx_context ||
(res_type == vmw_res_context && dev_priv->has_mob)) ?
sizeof(*ctx_info) : 0;
node->hash.key = (unsigned long) res;
ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a resource validation "
"entry.\n");
kfree(node);
ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
(void **)&ctx_info, &first_usage);
if (ret)
return ret;
}
node->res = vmw_resource_reference(res);
node->first_usage = true;
if (unlikely(p_node != NULL))
*p_node = node;
if (!dev_priv->has_mob) {
list_add_tail(&node->head, &sw_context->resource_list);
return 0;
if (priv_size && first_usage) {
ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
ctx_info);
if (ret)
return ret;
}
switch (vmw_res_type(res)) {
case vmw_res_context:
case vmw_res_dx_context:
list_add(&node->head, &sw_context->ctx_resource_list);
ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
break;
case vmw_res_cotable:
list_add_tail(&node->head, &sw_context->ctx_resource_list);
break;
default:
list_add_tail(&node->head, &sw_context->resource_list);
break;
}
/* Cache info about the last added resource */
rcache = &sw_context->res_cache[res_type];
rcache->res = res;
rcache->private = ctx_info;
rcache->valid = 1;
rcache->valid_handle = 0;
return ret;
}
......@@ -325,11 +279,11 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
* First add the resource the view is pointing to, otherwise
* it may be swapped out when the view is validated.
*/
ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
ret = vmw_resource_val_add(sw_context, vmw_view_srf(view));
if (ret)
return ret;
return vmw_resource_val_add(sw_context, view, NULL);
return vmw_resource_val_add(sw_context, view);
}
/**
......@@ -347,7 +301,7 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
enum vmw_view_type view_type, u32 id)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *view;
int ret;
......@@ -394,7 +348,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (IS_ERR(res))
continue;
ret = vmw_resource_val_add(sw_context, res, NULL);
ret = vmw_resource_val_add(sw_context, res);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
......@@ -415,8 +369,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res);
else
ret = vmw_resource_val_add(sw_context, entry->res,
NULL);
ret = vmw_resource_val_add(sw_context, entry->res);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
......@@ -427,9 +380,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
ret = vmw_bo_to_validate_list(sw_context,
dx_query_mob,
true, NULL);
ret = vmw_validation_add_bo(sw_context->ctx,
dx_query_mob, true, false);
}
mutex_unlock(&dev_priv->binding_mutex);
......@@ -531,68 +483,6 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
* @validate_as_mob: Validate this buffer as a MOB.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
* Returns -EINVAL if the limit of number of buffer objects per command
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_buffer_object *vbo,
bool validate_as_mob,
uint32_t *p_val_node)
{
uint32_t val_node;
struct vmw_validate_buffer *vval_buf;
struct ttm_validate_buffer *val_buf;
struct drm_hash_item *hash;
int ret;
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
DRM_ERROR("Inconsistent buffer usage.\n");
return -EINVAL;
}
val_buf = &vval_buf->base;
val_node = vval_buf - sw_context->val_bufs;
} else {
val_node = sw_context->cur_val_buf;
if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
DRM_ERROR("Max number of DMA buffers per submission "
"exceeded.\n");
return -EINVAL;
}
vval_buf = &sw_context->val_bufs[val_node];
vval_buf->hash.key = (unsigned long) vbo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation "
"entry.\n");
return ret;
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(&vbo->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
}
if (p_val_node)
*p_val_node = val_node;
return 0;
}
/**
* vmw_resources_reserve - Reserve all resources on the sw_context's
* resource list.
......@@ -605,27 +495,11 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret = 0;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
if (unlikely(ret != 0))
return ret;
if (res->backup) {
struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
vmw_resource_needs_backup(res), NULL);
int ret;
if (unlikely(ret != 0))
return ret;
}
}
ret = vmw_validation_res_reserve(sw_context->ctx, true);
if (ret)
return ret;
if (sw_context->dx_query_mob) {
struct vmw_buffer_object *expected_dx_query_mob;
......@@ -641,68 +515,23 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret;
}
/**
* vmw_resources_validate - Validate all resources on the sw_context's
* resource list.
*
* @sw_context: Pointer to the software context.
*
* Before this function is called, all resource backup buffers must have
* been validated.
*/
static int vmw_resources_validate(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
struct vmw_buffer_object *backup = res->backup;
ret = vmw_resource_validate(res, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
/* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) {
struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list
(sw_context, vbo,
vmw_resource_needs_backup(res), NULL);
if (ret) {
ttm_bo_unreserve(&vbo->base);
return ret;
}
}
}
return 0;
}
/**
* vmw_cmd_res_reloc_add - Add a resource to a software context's
* relocation- and validation lists.
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @sw_context: Pointer to the software context.
* @id_loc: Pointer to where the id that needs translation is located.
* @res: Valid pointer to a struct vmw_resource.
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
* used for this resource is returned here.
*
* Return: Zero on success, negative error code on error
*/
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t *id_loc,
struct vmw_resource *res,
struct vmw_resource_val_node **p_val)
struct vmw_resource *res)
{
int ret;
struct vmw_resource_val_node *node;
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
vmw_ptr_diff(sw_context->buf_start,
......@@ -711,13 +540,10 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
ret = vmw_resource_val_add(sw_context, res, &node);
ret = vmw_resource_val_add(sw_context, res);
if (unlikely(ret != 0))
return ret;
if (p_val)
*p_val = node;
return 0;
}
......@@ -741,17 +567,17 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
struct vmw_resource_val_node **p_val)
struct vmw_resource **p_res)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
struct vmw_resource *res;
struct vmw_resource_val_node *node;
int ret;
if (p_res)
*p_res = NULL;
if (*id_loc == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
DRM_ERROR("Illegal context invalid id.\n");
return -EINVAL;
......@@ -764,12 +590,11 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
* resource
*/
if (likely(rcache->valid && *id_loc == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
if (p_val)
*p_val = rcache->node;
if (p_res)
*p_res = res;
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
......@@ -789,18 +614,19 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return ret;
}
rcache->valid = true;
rcache->res = res;
rcache->handle = *id_loc;
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
res, &node);
res);
if (unlikely(ret != 0))
goto out_no_reloc;
rcache->node = node;
if (p_val)
*p_val = node;
if (p_res)
*p_res = res;
if (rcache->valid && rcache->res == res) {
rcache->valid_handle = true;
rcache->handle = *id_loc;
}
vmw_resource_unreference(&res);
return 0;
......@@ -861,22 +687,18 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
*/
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
struct vmw_ctx_validation_info *val;
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
if (unlikely(!val->staged_bindings))
break;
ret = vmw_binding_rebind_all
(vmw_context_binding_state(val->res));
list_for_each_entry(val, &sw_context->ctx_list, head) {
ret = vmw_binding_rebind_all(val->cur);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
ret = vmw_rebind_all_dx_query(val->res);
ret = vmw_rebind_all_dx_query(val->ctx);
if (ret != 0)
return ret;
}
......@@ -903,7 +725,7 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
uint32 view_ids[], u32 num_views,
u32 first_slot)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_cmdbuf_res_manager *man;
u32 i;
int ret;
......@@ -933,12 +755,12 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
return ret;
}
}
binding.bi.ctx = ctx_node->res;
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = view;
binding.bi.bt = binding_type;
binding.shader_slot = shader_slot;
binding.slot = first_slot + i;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
vmw_binding_add(ctx_node->staged, &binding.bi,
shader_slot, binding.slot);
if (view)
vmw_resource_unreference(&view);
......@@ -971,6 +793,34 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
user_context_converter, &cmd->cid, NULL);
}
/**
* vmw_execbuf_info_from_res - Get the private validation metadata for a
* recently validated resource
* @sw_context: Pointer to the command submission context
* @res: The resource
*
* The resource pointed to by @res needs to be present in the command submission
* context's resource cache and hence the last resource of that type to be
* processed by the validation code.
*
* Return: a pointer to the private metadata of the resource, or NULL
* if it wasn't found
*/
static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
struct vmw_resource *res)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[vmw_res_type(res)];
if (rcache->valid && rcache->res == res)
return rcache->private;
WARN_ON_ONCE(true);
return NULL;
}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
......@@ -979,8 +829,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
struct vmw_resource_val_node *ctx_node;
struct vmw_resource_val_node *res_node;
struct vmw_resource *ctx;
struct vmw_resource *res;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
......@@ -993,25 +843,29 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
&ctx);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.target.sid, &res_node);
&cmd->body.target.sid, &res);
if (unlikely(ret != 0))
return ret;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_view binding;
struct vmw_ctx_validation_info *node;
binding.bi.ctx = ctx_node->res;
binding.bi.res = res_node ? res_node->res : NULL;
node = vmw_execbuf_info_from_res(sw_context, ctx);
if (!node)
return -EINVAL;
binding.bi.ctx = ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_rt;
binding.slot = cmd->body.type;
vmw_binding_add(ctx_node->staged_bindings,
&binding.bi, 0, binding.slot);
vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
}
return 0;
......@@ -1171,17 +1025,17 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
dev_priv->has_mob, NULL);
ret = vmw_validation_add_bo(sw_context->ctx,
sw_context->cur_query_bo,
dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
dev_priv->has_mob, NULL);
ret = vmw_validation_add_bo(sw_context->ctx,
dev_priv->dummy_query_bo,
dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
......@@ -1306,8 +1160,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->mob_loc = id;
reloc->location = NULL;
reloc->vbo = vmw_bo;
ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
if (unlikely(ret != 0))
goto out_no_reloc;
......@@ -1365,8 +1220,9 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
reloc->vbo = vmw_bo;
ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
if (unlikely(ret != 0))
goto out_no_reloc;
......@@ -1400,7 +1256,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
} *cmd;
int ret;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *cotable_res;
......@@ -1415,7 +1271,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
vmw_resource_unreference(&cotable_res);
......@@ -1462,7 +1318,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
return ret;
sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
vmw_bo_unreference(&vmw_bo);
......@@ -1837,8 +1693,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
struct vmw_resource_val_node *ctx_node;
struct vmw_resource_val_node *res_node;
struct vmw_resource *ctx;
struct vmw_resource *res;
int ret;
cmd = container_of(header, struct vmw_tex_state_cmd,
......@@ -1846,7 +1702,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->state.cid,
&ctx_node);
&ctx);
if (unlikely(ret != 0))
return ret;
......@@ -1862,19 +1718,24 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cur_state->value, &res_node);
&cur_state->value, &res);
if (unlikely(ret != 0))
return ret;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_tex binding;
struct vmw_ctx_validation_info *node;
binding.bi.ctx = ctx_node->res;
binding.bi.res = res_node ? res_node->res : NULL;
node = vmw_execbuf_info_from_res(sw_context, ctx);
if (!node)
return -EINVAL;
binding.bi.ctx = ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_tex;
binding.texture_stage = cur_state->stage;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
0, binding.texture_stage);
vmw_binding_add(node->staged, &binding.bi, 0,
binding.texture_stage);
}
}
......@@ -1922,24 +1783,25 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
*/
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource_val_node *val_node,
struct vmw_resource *res,
uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_buffer_object *dma_buf;
struct vmw_buffer_object *vbo;
void *info;
int ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
info = vmw_execbuf_info_from_res(sw_context, res);
if (!info)
return -EINVAL;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
if (ret)
return ret;
val_node->switching_backup = true;
if (val_node->first_usage)
val_node->no_buffer_needed = true;
vmw_bo_unreference(&val_node->new_backup);
val_node->new_backup = dma_buf;
val_node->new_backup_offset = backup_offset;
vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
backup_offset);
vmw_bo_unreference(&vbo);
return 0;
}
......@@ -1970,15 +1832,15 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_resource_val_node *val_node;
struct vmw_resource *res;
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
converter, res_id, &val_node);
converter, res_id, &res);
if (ret)
return ret;
return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
buf_id, backup_offset);
}
......@@ -2170,14 +2032,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
} *cmd;
int ret;
size_t size;
struct vmw_resource_val_node *val;
struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&val);
&ctx);
if (unlikely(ret != 0))
return ret;
......@@ -2186,7 +2048,7 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
size = cmd->header.size - sizeof(cmd->body);
ret = vmw_compat_shader_add(dev_priv,
vmw_context_res_man(val->res),
vmw_context_res_man(ctx),
cmd->body.shid, cmd + 1,
cmd->body.type, size,
&sw_context->staged_cmd_res);
......@@ -2217,21 +2079,21 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
struct vmw_resource_val_node *val;
struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&val);
&ctx);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
ret = vmw_shader_remove(vmw_context_res_man(val->res),
ret = vmw_shader_remove(vmw_context_res_man(ctx),
cmd->body.shid,
cmd->body.type,
&sw_context->staged_cmd_res);
......@@ -2261,9 +2123,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
struct vmw_ctx_bindinfo_shader binding;
struct vmw_resource *res = NULL;
struct vmw_resource *ctx, *res = NULL;
struct vmw_ctx_validation_info *ctx_info;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
......@@ -2277,7 +2139,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
&ctx);
if (unlikely(ret != 0))
return ret;
......@@ -2285,34 +2147,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid,
cmd->body.type);
if (!IS_ERR(res)) {
struct vmw_resource *tmp_res = res;
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
&cmd->body.shid, res,
&res_node);
vmw_resource_unreference(&res);
&cmd->body.shid, res);
vmw_resource_unreference(&tmp_res);
if (unlikely(ret != 0))
return ret;
}
}
if (!res_node) {
if (IS_ERR_OR_NULL(res)) {
ret = vmw_cmd_res_check(dev_priv, sw_context,
vmw_res_shader,
user_shader_converter,
&cmd->body.shid, &res_node);
&cmd->body.shid, &res);
if (unlikely(ret != 0))
return ret;
}
binding.bi.ctx = ctx_node->res;
binding.bi.res = res_node ? res_node->res : NULL;
ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
if (!ctx_info)
return -EINVAL;
binding.bi.ctx = ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
vmw_binding_add(ctx_info->staged, &binding.bi,
binding.shader_slot, 0);
return 0;
}
......@@ -2393,8 +2260,8 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
struct vmw_resource_val_node *res_node = NULL;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_cb binding;
int ret;
......@@ -2406,12 +2273,12 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.sid, &res_node);
&cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
binding.bi.ctx = ctx_node->res;
binding.bi.res = res_node ? res_node->res : NULL;
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
binding.offset = cmd->body.offsetInBytes;
......@@ -2426,7 +2293,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
return -EINVAL;
}
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, binding.slot);
return 0;
......@@ -2482,7 +2349,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdDXSetShader body;
} *cmd;
struct vmw_resource *res = NULL;
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_shader binding;
int ret = 0;
......@@ -2506,17 +2373,17 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
return PTR_ERR(res);
}
ret = vmw_resource_val_add(sw_context, res, NULL);
ret = vmw_resource_val_add(sw_context, res);
if (ret)
goto out_unref;
}
binding.bi.ctx = ctx_node->res;
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, 0);
out_unref:
if (res)
......@@ -2537,9 +2404,9 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_vb binding;
struct vmw_resource_val_node *res_node;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetVertexBuffers body;
......@@ -2564,18 +2431,18 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->buf[i].sid, &res_node);
&cmd->buf[i].sid, &res);
if (unlikely(ret != 0))
return ret;
binding.bi.ctx = ctx_node->res;
binding.bi.ctx = ctx_node->ctx;
binding.bi.bt = vmw_ctx_binding_vb;
binding.bi.res = ((res_node) ? res_node->res : NULL);
binding.bi.res = res;
binding.offset = cmd->buf[i].offset;
binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot);
}
......@@ -2594,9 +2461,9 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_ib binding;
struct vmw_resource_val_node *res_node;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetIndexBuffer body;
......@@ -2611,17 +2478,17 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.sid, &res_node);
&cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
binding.bi.ctx = ctx_node->res;
binding.bi.res = ((res_node) ? res_node->res : NULL);
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_ib;
binding.offset = cmd->body.offset;
binding.format = cmd->body.format;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
return 0;
}
......@@ -2708,8 +2575,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource_val_node *srf_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *srf;
struct vmw_resource *res;
enum vmw_view_type view_type;
int ret;
......@@ -2734,19 +2601,19 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->sid, &srf_node);
&cmd->sid, &srf);
if (unlikely(ret != 0))
return ret;
res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
ret = vmw_cotable_notify(res, cmd->defined_id);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
return vmw_view_add(sw_context->man,
ctx_node->res,
srf_node->res,
ctx_node->ctx,
srf,
view_type,
cmd->defined_id,
header,
......@@ -2766,9 +2633,9 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_so binding;
struct vmw_resource_val_node *res_node;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSOTargets body;
......@@ -2793,18 +2660,18 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->targets[i].sid, &res_node);
&cmd->targets[i].sid, &res);
if (unlikely(ret != 0))
return ret;
binding.bi.ctx = ctx_node->res;
binding.bi.res = ((res_node) ? res_node->res : NULL);
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_so,
binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot);
}
......@@ -2815,7 +2682,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
/*
* This is based on the fact that all affected define commands have
......@@ -2834,7 +2701,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
}
so_type = vmw_so_cmd_to_type(header->id);
res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cotable_notify(res, cmd->defined_id);
vmw_resource_unreference(&res);
......@@ -2882,7 +2749,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
......@@ -2907,7 +2774,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
......@@ -2953,7 +2820,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
......@@ -2966,13 +2833,13 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
return -EINVAL;
}
res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
ret = vmw_cotable_notify(res, cmd->body.shaderId);
vmw_resource_unreference(&res);
if (ret)
return ret;
return vmw_dx_shader_add(sw_context->man, ctx_node->res,
return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
cmd->body.shaderId, cmd->body.type,
&sw_context->staged_cmd_res);
}
......@@ -2989,7 +2856,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDestroyShader body;
......@@ -3021,8 +2888,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource_val_node *ctx_node;
struct vmw_resource_val_node *res_node;
struct vmw_resource *ctx;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
......@@ -3033,32 +2899,32 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
if (cmd->body.cid != SVGA3D_INVALID_ID) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter,
&cmd->body.cid, &ctx_node);
&cmd->body.cid, &ctx);
if (ret)
return ret;
} else {
ctx_node = sw_context->dx_ctx_node;
if (!ctx_node) {
if (!sw_context->dx_ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
ctx = sw_context->dx_ctx_node->ctx;
}
res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, 0);
if (IS_ERR(res)) {
DRM_ERROR("Could not find shader to bind.\n");
return PTR_ERR(res);
}
ret = vmw_resource_val_add(sw_context, res, &res_node);
ret = vmw_resource_val_add(sw_context, res);
if (ret) {
DRM_ERROR("Error creating resource validation node.\n");
goto out_unref;
}
ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
&cmd->body.mobid,
cmd->body.offsetInBytes);
out_unref:
......@@ -3645,13 +3511,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
{
uint32_t i;
struct vmw_relocation *reloc;
struct ttm_validate_buffer *validate;
struct ttm_buffer_object *bo;
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
validate = &sw_context->val_bufs[reloc->index].base;
bo = validate->bo;
bo = &reloc->vbo->base;
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
......@@ -3670,110 +3534,6 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
vmw_free_relocations(sw_context);
}
/**
* vmw_resource_list_unrefererence - Free up a resource list and unreference
* all resources referenced by it.
*
* @list: The resource list.
*/
static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
struct list_head *list)
{
struct vmw_resource_val_node *val, *val_next;
/*
* Drop references to resources held during command submission.
*/
list_for_each_entry_safe(val, val_next, list, head) {
list_del_init(&val->head);
vmw_resource_unreference(&val->res);
if (val->staged_bindings) {
if (val->staged_bindings != sw_context->staged_bindings)
vmw_binding_state_free(val->staged_bindings);
else
sw_context->staged_bindings_inuse = false;
val->staged_bindings = NULL;
}
kfree(val);
}
}
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
struct vmw_validate_buffer *entry, *next;
struct vmw_resource_val_node *val;
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
base.head) {
list_del(&entry->base.head);
ttm_bo_unref(&entry->base.bo);
(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
list_for_each_entry(val, &sw_context->resource_list, head)
(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
}
int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob)
{
struct vmw_buffer_object *vbo =
container_of(bo, struct vmw_buffer_object, base);
struct ttm_operation_ctx ctx = { interruptible, true };
int ret;
if (vbo->pin_count > 0)
return 0;
if (validate_as_mob)
return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* If there is no space in VRAM and GMR ids are all used up,
* start evicting GMRs to make room. If the DMA buffer can't be
* used as a GMR, this will return -ENOMEM.
*/
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
/**
* If that failed, try VRAM again, this time evicting
* previous contents.
*/
ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
return ret;
}
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
struct vmw_validate_buffer *entry;
int ret;
list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
true,
entry->validate_as_mob);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
uint32_t size)
{
......@@ -3946,7 +3706,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
if (sw_context->dx_ctx_node)
cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
sw_context->dx_ctx_node->res->id);
sw_context->dx_ctx_node->ctx->id);
else
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (!cmd) {
......@@ -3980,7 +3740,7 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
u32 command_size,
struct vmw_sw_context *sw_context)
{
u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
SVGA3D_INVALID_ID);
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
id, false, header);
......@@ -4057,7 +3817,6 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t handle)
{
struct vmw_resource_val_node *ctx_node;
struct vmw_resource *res;
int ret;
......@@ -4073,11 +3832,11 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
return ret;
}
ret = vmw_resource_val_add(sw_context, res, &ctx_node);
ret = vmw_resource_val_add(sw_context, res);
if (unlikely(ret != 0))
goto out_err;
sw_context->dx_ctx_node = ctx_node;
sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
sw_context->man = vmw_context_res_man(res);
out_err:
vmw_resource_unreference(&res);
......@@ -4098,14 +3857,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
struct list_head resource_list;
struct vmw_cmdbuf_header *header;
struct ww_acquire_ctx ticket;
uint32_t handle;
int ret;
int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
......@@ -4158,9 +3915,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
INIT_LIST_HEAD(&sw_context->resource_list);
INIT_LIST_HEAD(&sw_context->ctx_resource_list);
INIT_LIST_HEAD(&sw_context->ctx_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
......@@ -4168,7 +3923,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->dx_query_mob = NULL;
sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings);
......@@ -4180,24 +3934,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
INIT_LIST_HEAD(&resource_list);
sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
if (unlikely(ret != 0)) {
list_splice_init(&sw_context->ctx_resource_list,
&sw_context->resource_list);
if (unlikely(ret != 0))
goto out_err_nores;
}
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
/*
* Merge the resource lists before checking the return status
* from vmd_cmd_check_all so that all the open hashtabs will
* be handled properly even if vmw_cmd_check_all fails.
*/
list_splice_init(&sw_context->ctx_resource_list,
&sw_context->resource_list);
if (unlikely(ret != 0))
goto out_err_nores;
......@@ -4205,18 +3948,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
ret = vmw_validation_bo_reserve(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
ret = vmw_validation_bo_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
ret = vmw_resources_validate(sw_context);
ret = vmw_validation_res_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
vmw_validation_drop_ht(&val_ctx);
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) {
......@@ -4255,17 +3998,16 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resources_unreserve(sw_context, false);
vmw_execbuf_bindings_commit(sw_context, false);
vmw_bind_dx_query_mob(sw_context);
vmw_validation_res_unreserve(&val_ctx, false);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
vmw_validation_bo_fence(sw_context->ctx, fence);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
vmw_clear_validations(sw_context);
/*
* If anything fails here, give up trying to export the fence
* and do a sync since the user mode will not be able to sync
......@@ -4300,7 +4042,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_fence_obj_unreference(&fence);
}
list_splice_init(&sw_context->resource_list, &resource_list);
vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
......@@ -4308,34 +4049,35 @@ int vmw_execbuf_process(struct drm_file *file_priv,
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
vmw_resource_list_unreference(sw_context, &resource_list);
vmw_validation_unref_lists(&val_ctx);
return 0;
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
out_err:
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
vmw_validation_bo_backoff(&val_ctx);
out_err_nores:
vmw_resources_unreserve(sw_context, true);
vmw_execbuf_bindings_commit(sw_context, true);
vmw_validation_res_unreserve(&val_ctx, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
vmw_validation_drop_ht(&val_ctx);
WARN_ON(!list_empty(&sw_context->ctx_list));
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
vmw_resource_list_unreference(sw_context, &resource_list);
vmw_validation_unref_lists(&val_ctx);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
out_free_header:
......@@ -4398,38 +4140,31 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence)
{
int ret = 0;
struct list_head validate_list;
struct ttm_validate_buffer pinned_val, query_val;
struct vmw_fence_obj *lfence = NULL;
struct ww_acquire_ctx ticket;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
INIT_LIST_HEAD(&validate_list);
pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list);
ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
false);
if (ret)
goto out_no_reserve;
query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
false);
if (ret)
goto out_no_reserve;
ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
false, NULL);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
ret = vmw_validation_bo_reserve(&val_ctx, false);
if (ret)
goto out_no_reserve;
}
if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL);
ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
if (ret)
goto out_no_emit;
}
dev_priv->query_cid_valid = false;
}
......@@ -4443,22 +4178,22 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
NULL);
fence = lfence;
}
ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
vmw_validation_bo_fence(&val_ctx, fence);
if (lfence != NULL)
vmw_fence_obj_unreference(&lfence);
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
vmw_validation_unref_lists(&val_ctx);
vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock:
return;
out_no_emit:
ttm_eu_backoff_reservation(&ticket, &validate_list);
vmw_validation_bo_backoff(&val_ctx);
out_no_reserve:
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
vmw_validation_unref_lists(&val_ctx);
vmw_execbuf_unpin_panic(dev_priv);
vmw_bo_unreference(&dev_priv->pinned_bo);
}
/**
......
......@@ -2586,8 +2586,8 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
if (for_cpu_blit)
ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
else
ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
validate_as_mob);
ret = vmw_validation_bo_validate_single(bo, interruptible,
validate_as_mob);
if (ret)
ttm_bo_unreserve(bo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment