Commit 680360a4 authored by Deepak Rawat's avatar Deepak Rawat

drm/vmwgfx: Fix formatting and spaces in vmwgfx_execbuf.c

No functional change with this change, just fixing formatting and
spaces.

v2: Rebase.
Signed-off-by: default avatarDeepak Rawat <drawat@vmware.com>
Reviewed-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
parent 4062dd3e
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
__type body; \ __type body; \
} __var } __var
/* /**
* struct vmw_relocation - Buffer object relocation * struct vmw_relocation - Buffer object relocation
* *
* @head: List head for the command submission context's relocation list * @head: List head for the command submission context's relocation list
...@@ -78,9 +78,8 @@ struct vmw_relocation { ...@@ -78,9 +78,8 @@ struct vmw_relocation {
* command stream is replaced with the actual id after validation. * command stream is replaced with the actual id after validation.
* @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
* with a NOP. * with a NOP.
* @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
* after validation is -1, the command is replaced with a NOP. Otherwise no * validation is -1, the command is replaced with a NOP. Otherwise no action.
* action.
*/ */
enum vmw_resource_relocation_type { enum vmw_resource_relocation_type {
vmw_res_rel_normal, vmw_res_rel_normal,
...@@ -94,8 +93,8 @@ enum vmw_resource_relocation_type { ...@@ -94,8 +93,8 @@ enum vmw_resource_relocation_type {
* *
* @head: List head for the software context's relocation list. * @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource. * @res: Non-ref-counted pointer to the resource.
* @offset: Offset of single byte entries into the command buffer where the * @offset: Offset of single byte entries into the command buffer where the id
* id that needs fixup is located. * that needs fixup is located.
* @rel_type: Type of relocation. * @rel_type: Type of relocation.
*/ */
struct vmw_resource_relocation { struct vmw_resource_relocation {
...@@ -105,8 +104,9 @@ struct vmw_resource_relocation { ...@@ -105,8 +104,9 @@ struct vmw_resource_relocation {
enum vmw_resource_relocation_type rel_type:3; enum vmw_resource_relocation_type rel_type:3;
}; };
/* /**
* struct vmw_ctx_validation_info - Extra validation metadata for contexts * struct vmw_ctx_validation_info - Extra validation metadata for contexts
*
* @head: List head of context list * @head: List head of context list
* @ctx: The context resource * @ctx: The context resource
* @cur: The context's persistent binding state * @cur: The context's persistent binding state
...@@ -161,9 +161,10 @@ static size_t vmw_ptr_diff(void *a, void *b) ...@@ -161,9 +161,10 @@ static size_t vmw_ptr_diff(void *a, void *b)
/** /**
* vmw_execbuf_bindings_commit - Commit modified binding state * vmw_execbuf_bindings_commit - Commit modified binding state
*
* @sw_context: The command submission context * @sw_context: The command submission context
* @backoff: Whether this is part of the error path and binding state * @backoff: Whether this is part of the error path and binding state changes
* changes should be ignored * should be ignored
*/ */
static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
bool backoff) bool backoff)
...@@ -173,6 +174,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, ...@@ -173,6 +174,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
list_for_each_entry(entry, &sw_context->ctx_list, head) { list_for_each_entry(entry, &sw_context->ctx_list, head) {
if (!backoff) if (!backoff)
vmw_binding_state_commit(entry->cur, entry->staged); vmw_binding_state_commit(entry->cur, entry->staged);
if (entry->staged != sw_context->staged_bindings) if (entry->staged != sw_context->staged_bindings)
vmw_binding_state_free(entry->staged); vmw_binding_state_free(entry->staged);
else else
...@@ -185,6 +187,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, ...@@ -185,6 +187,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
/** /**
* vmw_bind_dx_query_mob - Bind the DX query MOB if referenced * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
*
* @sw_context: The command submission context * @sw_context: The command submission context
*/ */
static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
...@@ -195,8 +198,8 @@ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) ...@@ -195,8 +198,8 @@ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
} }
/** /**
* vmw_cmd_ctx_first_setup - Perform the setup needed when a context is * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
* added to the validate list. * the validate list.
* *
* @dev_priv: Pointer to the device private: * @dev_priv: Pointer to the device private:
* @sw_context: The command submission context * @sw_context: The command submission context
...@@ -214,8 +217,7 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, ...@@ -214,8 +217,7 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
goto out_err; goto out_err;
if (!sw_context->staged_bindings) { if (!sw_context->staged_bindings) {
sw_context->staged_bindings = sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
vmw_binding_state_alloc(dev_priv);
if (IS_ERR(sw_context->staged_bindings)) { if (IS_ERR(sw_context->staged_bindings)) {
ret = PTR_ERR(sw_context->staged_bindings); ret = PTR_ERR(sw_context->staged_bindings);
sw_context->staged_bindings = NULL; sw_context->staged_bindings = NULL;
...@@ -240,19 +242,20 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, ...@@ -240,19 +242,20 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
list_add_tail(&node->head, &sw_context->ctx_list); list_add_tail(&node->head, &sw_context->ctx_list);
return 0; return 0;
out_err: out_err:
return ret; return ret;
} }
/** /**
* vmw_execbuf_res_size - calculate extra size fore the resource validation * vmw_execbuf_res_size - calculate extra size fore the resource validation node
* node *
* @dev_priv: Pointer to the device private struct. * @dev_priv: Pointer to the device private struct.
* @res_type: The resource type. * @res_type: The resource type.
* *
* Guest-backed contexts and DX contexts require extra size to store * Guest-backed contexts and DX contexts require extra size to store execbuf
* execbuf private information in the validation node. Typically the * private information in the validation node. Typically the binding manager
* binding manager associated data structures. * associated data structures.
* *
* Returns: The extra size requirement based on resource type. * Returns: The extra size requirement based on resource type.
*/ */
...@@ -269,8 +272,8 @@ static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, ...@@ -269,8 +272,8 @@ static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
* *
* @rcache: Pointer to the entry to update. * @rcache: Pointer to the entry to update.
* @res: Pointer to the resource. * @res: Pointer to the resource.
* @private: Pointer to the execbuf-private space in the resource * @private: Pointer to the execbuf-private space in the resource validation
* validation node. * node.
*/ */
static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
struct vmw_resource *res, struct vmw_resource *res,
...@@ -283,15 +286,15 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, ...@@ -283,15 +286,15 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
} }
/** /**
* vmw_execbuf_res_noref_val_add - Add a resource described by an * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
* unreferenced rcu-protected pointer to the validation list. * rcu-protected pointer to the validation list.
*
* @sw_context: Pointer to the software context. * @sw_context: Pointer to the software context.
* @res: Unreferenced rcu-protected pointer to the resource. * @res: Unreferenced rcu-protected pointer to the resource.
* @dirty: Whether to change dirty status. * @dirty: Whether to change dirty status.
* *
* Returns: 0 on success. Negative error code on failure. Typical error * Returns: 0 on success. Negative error code on failure. Typical error codes
* codes are %-EINVAL on inconsistency and %-ESRCH if the resource was * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
* doomed.
*/ */
static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *res, struct vmw_resource *res,
...@@ -338,6 +341,7 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, ...@@ -338,6 +341,7 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
/** /**
* vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
* validation list if it's not already on it * validation list if it's not already on it
*
* @sw_context: Pointer to the software context. * @sw_context: Pointer to the software context.
* @res: Pointer to the resource. * @res: Pointer to the resource.
* @dirty: Whether to change dirty status. * @dirty: Whether to change dirty status.
...@@ -372,8 +376,8 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context, ...@@ -372,8 +376,8 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
} }
/** /**
* vmw_view_res_val_add - Add a view and the surface it's pointing to * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
* to the validation list * validation list
* *
* @sw_context: The software context holding the validation list. * @sw_context: The software context holding the validation list.
* @view: Pointer to the view resource. * @view: Pointer to the view resource.
...@@ -386,8 +390,8 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, ...@@ -386,8 +390,8 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
int ret; int ret;
/* /*
* First add the resource the view is pointing to, otherwise * First add the resource the view is pointing to, otherwise it may be
* it may be swapped out when the view is validated. * swapped out when the view is validated.
*/ */
ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view), ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
vmw_view_dirtying(view)); vmw_view_dirtying(view));
...@@ -399,16 +403,16 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, ...@@ -399,16 +403,16 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
} }
/** /**
* vmw_view_id_val_add - Look up a view and add it and the surface it's * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
* pointing to to the validation list. * to to the validation list.
* *
* @sw_context: The software context holding the validation list. * @sw_context: The software context holding the validation list.
* @view_type: The view type to look up. * @view_type: The view type to look up.
* @id: view id of the view. * @id: view id of the view.
* *
* The view is represented by a view id and the DX context it's created on, * The view is represented by a view id and the DX context it's created on, or
* or scheduled for creation on. If there is no DX context set, the function * scheduled for creation on. If there is no DX context set, the function will
* will return an -EINVAL error pointer. * return an -EINVAL error pointer.
* *
* Returns: Unreferenced pointer to the resource on success, negative error * Returns: Unreferenced pointer to the resource on success, negative error
* pointer on failure. * pointer on failure.
...@@ -443,8 +447,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context, ...@@ -443,8 +447,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context,
* @sw_context: Pointer to a software context used for this command submission * @sw_context: Pointer to a software context used for this command submission
* @ctx: Pointer to the context resource * @ctx: Pointer to the context resource
* *
* This function puts all resources that were previously bound to @ctx on * This function puts all resources that were previously bound to @ctx on the
* the resource validation list. This is part of the context state reemission * resource validation list. This is part of the context state reemission
*/ */
static int vmw_resource_context_res_add(struct vmw_private *dev_priv, static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
...@@ -470,7 +474,6 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -470,7 +474,6 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
} }
} }
/* Add all resources bound to the context to the validation list */ /* Add all resources bound to the context to the validation list */
mutex_lock(&dev_priv->binding_mutex); mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx); binding_list = vmw_context_binding_list(ctx);
...@@ -504,8 +507,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -504,8 +507,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* *
* @list: Pointer to head of relocation list. * @list: Pointer to head of relocation list.
* @res: The resource. * @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the * @offset: Offset into the command buffer currently being parsed where the id
* id that needs fixup is located. Granularity is one byte. * that needs fixup is located. Granularity is one byte.
* @rel_type: Relocation type. * @rel_type: Relocation type.
*/ */
static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
...@@ -538,17 +541,15 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, ...@@ -538,17 +541,15 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
static void vmw_resource_relocations_free(struct list_head *list) static void vmw_resource_relocations_free(struct list_head *list)
{ {
/* Memory is validation context memory, so no need to free it */ /* Memory is validation context memory, so no need to free it */
INIT_LIST_HEAD(list); INIT_LIST_HEAD(list);
} }
/** /**
* vmw_resource_relocations_apply - Apply all relocations on a list * vmw_resource_relocations_apply - Apply all relocations on a list
* *
* @cb: Pointer to the start of the command buffer bein patch. This need * @cb: Pointer to the start of the command buffer bein patch. This need not be
* not be the same buffer as the one being parsed when the relocation * the same buffer as the one being parsed when the relocation list was built,
* list was built, but the contents must be the same modulo the * but the contents must be the same modulo the resource ids.
* resource ids.
* @list: Pointer to the head of the relocation list. * @list: Pointer to the head of the relocation list.
*/ */
static void vmw_resource_relocations_apply(uint32_t *cb, static void vmw_resource_relocations_apply(uint32_t *cb,
...@@ -592,14 +593,14 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, ...@@ -592,14 +593,14 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
} }
/** /**
* vmw_resources_reserve - Reserve all resources on the sw_context's * vmw_resources_reserve - Reserve all resources on the sw_context's resource
* resource list. * list.
* *
* @sw_context: Pointer to the software context. * @sw_context: Pointer to the software context.
* *
* Note that since vmware's command submission currently is protected by * Note that since vmware's command submission currently is protected by the
* the cmdbuf mutex, no fancy deadlock avoidance is required for resources, * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
* since only a single thread at once will attempt this. * only a single thread at once will attempt this.
*/ */
static int vmw_resources_reserve(struct vmw_sw_context *sw_context) static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{ {
...@@ -624,18 +625,18 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) ...@@ -624,18 +625,18 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
} }
/** /**
* vmw_cmd_res_check - Check that a resource is present and if so, put it * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
* on the resource validate list unless it's already there. * resource validate list unless it's already there.
* *
* @dev_priv: Pointer to a device private structure. * @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context. * @sw_context: Pointer to the software context.
* @res_type: Resource type. * @res_type: Resource type.
* @dirty: Whether to change dirty status. * @dirty: Whether to change dirty status.
* @converter: User-space visisble type specific information. * @converter: User-space visisble type specific information.
* @id_loc: Pointer to the location in the command buffer currently being * @id_loc: Pointer to the location in the command buffer currently being parsed
* parsed from where the user-space resource id handle is located. * from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated * @p_val: Pointer to pointer to resource validalidation node. Populated on
* on exit. * exit.
*/ */
static int static int
vmw_cmd_res_check(struct vmw_private *dev_priv, vmw_cmd_res_check(struct vmw_private *dev_priv,
...@@ -736,8 +737,8 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) ...@@ -736,8 +737,8 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
} }
/** /**
* vmw_rebind_contexts - Rebind all resources previously bound to * vmw_rebind_contexts - Rebind all resources previously bound to referenced
* referenced contexts. * contexts.
* *
* @sw_context: Pointer to the software context. * @sw_context: Pointer to the software context.
* *
...@@ -767,8 +768,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) ...@@ -767,8 +768,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
} }
/** /**
* vmw_view_bindings_add - Add an array of view bindings to a context * vmw_view_bindings_add - Add an array of view bindings to a context binding
* binding state tracker. * state tracker.
* *
* @sw_context: The execbuf state used for this command. * @sw_context: The execbuf state used for this command.
* @view_type: View type for the bindings. * @view_type: View type for the bindings.
...@@ -840,6 +841,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, ...@@ -840,6 +841,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
/** /**
* vmw_execbuf_info_from_res - Get the private validation metadata for a * vmw_execbuf_info_from_res - Get the private validation metadata for a
* recently validated resource * recently validated resource
*
* @sw_context: Pointer to the command submission context * @sw_context: Pointer to the command submission context
* @res: The resource * @res: The resource
* *
...@@ -847,8 +849,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, ...@@ -847,8 +849,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
* context's resource cache and hence the last resource of that type to be * context's resource cache and hence the last resource of that type to be
* processed by the validation code. * processed by the validation code.
* *
* Return: a pointer to the private metadata of the resource, or NULL * Return: a pointer to the private metadata of the resource, or NULL if it
* if it wasn't found * wasn't found
*/ */
static struct vmw_ctx_validation_info * static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
...@@ -864,7 +866,6 @@ vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, ...@@ -864,7 +866,6 @@ vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
return NULL; return NULL;
} }
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
...@@ -983,6 +984,7 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, ...@@ -983,6 +984,7 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
&cmd->body.src.sid, NULL); &cmd->body.src.sid, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
VMW_RES_DIRTY_SET, user_surface_converter, VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest.sid, NULL); &cmd->body.dest.sid, NULL);
...@@ -1019,11 +1021,10 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, ...@@ -1019,11 +1021,10 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* @new_query_bo: The new buffer holding query results. * @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission. * @sw_context: The software context used for this command submission.
* *
* This function checks whether @new_query_bo is suitable for holding * This function checks whether @new_query_bo is suitable for holding query
* query results, and if another buffer currently is pinned for query * results, and if another buffer currently is pinned for query results. If so,
* results. If so, the function prepares the state of @sw_context for * the function prepares the state of @sw_context for switching pinned buffers
* switching pinned buffers after successful submission of the current * after successful submission of the current command batch.
* command batch.
*/ */
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct vmw_buffer_object *new_query_bo, struct vmw_buffer_object *new_query_bo,
...@@ -1058,13 +1059,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, ...@@ -1058,13 +1059,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
dev_priv->has_mob, false); dev_priv->has_mob, false);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} }
return 0; return 0;
} }
/** /**
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
* *
...@@ -1073,11 +1072,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, ...@@ -1073,11 +1072,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
* *
* This function will check if we're switching query buffers, and will then, * This function will check if we're switching query buffers, and will then,
* issue a dummy occlusion query wait used as a query barrier. When the fence * issue a dummy occlusion query wait used as a query barrier. When the fence
* object following that query wait has signaled, we are sure that all * object following that query wait has signaled, we are sure that all preceding
* preceding queries have finished, and the old query buffer can be unpinned. * queries have finished, and the old query buffer can be unpinned. However,
* However, since both the new query buffer and the old one are fenced with * since both the new query buffer and the old one are fenced with that fence,
* that fence, we can do an asynchronus unpin now, and be sure that the * we can do an asynchronus unpin now, and be sure that the old query buffer
* old query buffer won't be moved until the fence has signaled. * won't be moved until the fence has signaled.
* *
* As mentioned above, both the new - and old query buffers need to be fenced * As mentioned above, both the new - and old query buffers need to be fenced
* using a sequence emitted *after* calling this function. * using a sequence emitted *after* calling this function.
...@@ -1089,7 +1088,6 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1089,7 +1088,6 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* The validate list should still hold references to all * The validate list should still hold references to all
* contexts here. * contexts here.
*/ */
if (sw_context->needs_post_query_barrier) { if (sw_context->needs_post_query_barrier) {
struct vmw_res_cache_entry *ctx_entry = struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context]; &sw_context->res_cache[vmw_res_context];
...@@ -1116,10 +1114,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1116,10 +1114,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
/* /*
* We pin also the dummy_query_bo buffer so that we * We pin also the dummy_query_bo buffer so that we
* don't need to validate it when emitting * don't need to validate it when emitting dummy queries
* dummy queries in context destroy paths. * in context destroy paths.
*/ */
if (!dev_priv->dummy_query_bo_pinned) { if (!dev_priv->dummy_query_bo_pinned) {
vmw_bo_pin_reserved(dev_priv->dummy_query_bo, vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
true); true);
...@@ -1136,22 +1133,24 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1136,22 +1133,24 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
} }
/** /**
* vmw_translate_mob_pointer - Prepare to translate a user-space buffer * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
* handle to a MOB id. * to a MOB id.
* *
* @dev_priv: Pointer to a device private structure. * @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation. * @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated. * @id: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry * @vmw_bo_p: Points to a location that, on successful return will carry a
* a non-reference-counted pointer to the buffer object identified by the * non-reference-counted pointer to the buffer object identified by the
* user-space handle in @id. * user-space handle in @id.
* *
* This function saves information needed to translate a user-space buffer * This function saves information needed to translate a user-space buffer
* handle to a MOB id. The translation does not take place immediately, but * handle to a MOB id. The translation does not take place immediately, but
* during a call to vmw_apply_relocations(). This function builds a relocation * during a call to vmw_apply_relocations().
* list and a list of buffers to validate. The former needs to be freed using *
* either vmw_apply_relocations() or vmw_free_relocations(). The latter * This function builds a relocation list and a list of buffers to validate. The
* needs to be freed using vmw_clear_validations. * former needs to be freed using either vmw_apply_relocations() or
* vmw_free_relocations(). The latter needs to be freed using
* vmw_clear_validations.
*/ */
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
...@@ -1189,19 +1188,20 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1189,19 +1188,20 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
} }
/** /**
* vmw_translate_guest_pointer - Prepare to translate a user-space buffer * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
* handle to a valid SVGAGuestPtr * to a valid SVGAGuestPtr
* *
* @dev_priv: Pointer to a device private structure. * @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation. * @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated. * @ptr: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry * @vmw_bo_p: Points to a location that, on successful return will carry a
* a non-reference-counted pointer to the DMA buffer identified by the * non-reference-counted pointer to the DMA buffer identified by the user-space
* user-space handle in @id. * handle in @id.
* *
* This function saves information needed to translate a user-space buffer * This function saves information needed to translate a user-space buffer
* handle to a valid SVGAGuestPtr. The translation does not take place * handle to a valid SVGAGuestPtr. The translation does not take place
* immediately, but during a call to vmw_apply_relocations(). * immediately, but during a call to vmw_apply_relocations().
*
* This function builds a relocation list and a list of buffers to validate. * This function builds a relocation list and a list of buffers to validate.
* The former needs to be freed using either vmw_apply_relocations() or * The former needs to be freed using either vmw_apply_relocations() or
* vmw_free_relocations(). The latter needs to be freed using * vmw_free_relocations(). The latter needs to be freed using
...@@ -1241,10 +1241,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1241,10 +1241,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return 0; return 0;
} }
/** /**
* vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command. * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission. * @sw_context: The software context used for this command submission.
...@@ -1276,19 +1274,16 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, ...@@ -1276,19 +1274,16 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
return ret; return ret;
} }
/** /**
* vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command. * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission. * @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream. * @header: Pointer to the command header in the command stream.
* *
* The query bind operation will eventually associate the query ID * The query bind operation will eventually associate the query ID with its
* with its backing MOB. In this function, we take the user mode * backing MOB. In this function, we take the user mode MOB ID and use
* MOB ID and use vmw_translate_mob_ptr() to translate it to its * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
* kernel mode equivalent.
*/ */
static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
...@@ -1315,10 +1310,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, ...@@ -1315,10 +1310,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
return 0; return 0;
} }
/** /**
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission. * @sw_context: The software context used for this command submission.
...@@ -1337,7 +1330,7 @@ static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, ...@@ -1337,7 +1330,7 @@ static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission. * @sw_context: The software context used for this command submission.
...@@ -1370,7 +1363,7 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv, ...@@ -1370,7 +1363,7 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission. * @sw_context: The software context used for this command submission.
...@@ -1389,8 +1382,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ...@@ -1389,8 +1382,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&cmd->body.mobid,
&vmw_bo); &vmw_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1401,7 +1393,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ...@@ -1401,7 +1393,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission. * @sw_context: The software context used for this command submission.
...@@ -1437,8 +1429,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -1437,8 +1429,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
return ret; return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context, ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.guestResult, &cmd->body.guestResult, &vmw_bo);
&vmw_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1448,7 +1439,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -1448,7 +1439,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission. * @sw_context: The software context used for this command submission.
...@@ -1467,8 +1458,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, ...@@ -1467,8 +1458,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&cmd->body.mobid,
&vmw_bo); &vmw_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1477,7 +1467,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, ...@@ -1477,7 +1467,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission. * @sw_context: The software context used for this command submission.
...@@ -1513,8 +1503,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, ...@@ -1513,8 +1503,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret; return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context, ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.guestResult, &cmd->body.guestResult, &vmw_bo);
&vmw_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1544,8 +1533,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1544,8 +1533,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
} }
ret = vmw_translate_guest_ptr(dev_priv, sw_context, ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.guest.ptr, &cmd->body.guest.ptr, &vmw_bo);
&vmw_bo);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1573,8 +1561,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1573,8 +1561,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
header);
return 0; return 0;
} }
...@@ -1631,7 +1618,6 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv, ...@@ -1631,7 +1618,6 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
return 0; return 0;
} }
static int vmw_cmd_tex_state(struct vmw_private *dev_priv, static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
...@@ -1701,12 +1687,10 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, ...@@ -1701,12 +1687,10 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
SVGAFifoCmdDefineGMRFB body; SVGAFifoCmdDefineGMRFB body;
} *cmd = buf; } *cmd = buf;
return vmw_translate_guest_ptr(dev_priv, sw_context, return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
&cmd->body.ptr,
&vmw_bo); &vmw_bo);
} }
/** /**
* vmw_cmd_res_switch_backup - Utility function to handle backup buffer * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
* switching * switching
...@@ -1718,14 +1702,13 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, ...@@ -1718,14 +1702,13 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
* stream. * stream.
* @backup_offset: Offset of backup into MOB. * @backup_offset: Offset of backup into MOB.
* *
* This function prepares for registering a switch of backup buffers * This function prepares for registering a switch of backup buffers in the
* in the resource metadata just prior to unreserving. It's basically a wrapper * resource metadata just prior to unreserving. It's basically a wrapper around
* around vmw_cmd_res_switch_backup with a different interface. * vmw_cmd_res_switch_backup with a different interface.
*/ */
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
struct vmw_resource *res, struct vmw_resource *res, uint32_t *buf_id,
uint32_t *buf_id,
unsigned long backup_offset) unsigned long backup_offset)
{ {
struct vmw_buffer_object *vbo; struct vmw_buffer_object *vbo;
...@@ -1745,7 +1728,6 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, ...@@ -1745,7 +1728,6 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
return 0; return 0;
} }
/** /**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
* *
...@@ -1758,17 +1740,15 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, ...@@ -1758,17 +1740,15 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
* stream. * stream.
* @backup_offset: Offset of backup into MOB. * @backup_offset: Offset of backup into MOB.
* *
* This function prepares for registering a switch of backup buffers * This function prepares for registering a switch of backup buffers in the
* in the resource metadata just prior to unreserving. It's basically a wrapper * resource metadata just prior to unreserving. It's basically a wrapper around
* around vmw_cmd_res_switch_backup with a different interface. * vmw_cmd_res_switch_backup with a different interface.
*/ */
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
enum vmw_res_type res_type, enum vmw_res_type res_type,
const struct vmw_user_resource_conv const struct vmw_user_resource_conv
*converter, *converter, uint32_t *res_id, uint32_t *buf_id,
uint32_t *res_id,
uint32_t *buf_id,
unsigned long backup_offset) unsigned long backup_offset)
{ {
struct vmw_resource *res; struct vmw_resource *res;
...@@ -1779,13 +1759,12 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, ...@@ -1779,13 +1759,12 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
if (ret) if (ret)
return ret; return ret;
return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
buf_id, backup_offset); backup_offset);
} }
/** /**
* vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -1799,14 +1778,12 @@ static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, ...@@ -1799,14 +1778,12 @@ static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
container_of(header, typeof(*cmd), header); container_of(header, typeof(*cmd), header);
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, user_surface_converter, &cmd->body.sid,
&cmd->body.sid, &cmd->body.mobid, &cmd->body.mobid, 0);
0);
} }
/** /**
* vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -1825,8 +1802,7 @@ static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, ...@@ -1825,8 +1802,7 @@ static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -1845,8 +1821,7 @@ static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, ...@@ -1845,8 +1821,7 @@ static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -1865,7 +1840,7 @@ static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, ...@@ -1865,7 +1840,7 @@ static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
* command * command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
...@@ -1885,7 +1860,7 @@ static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, ...@@ -1885,7 +1860,7 @@ static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
* command * command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
...@@ -1905,8 +1880,8 @@ static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, ...@@ -1905,8 +1880,8 @@ static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_invalidate_gb_surface - Validate an * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
* SVGA_3D_CMD_INVALIDATE_GB_SURFACE command * command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -1924,10 +1899,8 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, ...@@ -1924,10 +1899,8 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
&cmd->body.sid, NULL); &cmd->body.sid, NULL);
} }
/** /**
* vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -1954,24 +1927,20 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, ...@@ -1954,24 +1927,20 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return 0; return 0;
size = cmd->header.size - sizeof(cmd->body); size = cmd->header.size - sizeof(cmd->body);
ret = vmw_compat_shader_add(dev_priv, ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
vmw_context_res_man(ctx), cmd->body.shid, cmd + 1, cmd->body.type,
cmd->body.shid, cmd + 1, size, &sw_context->staged_cmd_res);
cmd->body.type, size,
&sw_context->staged_cmd_res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
return vmw_resource_relocation_add(sw_context, return vmw_resource_relocation_add(sw_context, NULL,
NULL,
vmw_ptr_diff(sw_context->buf_start, vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id), &cmd->header.id),
vmw_res_rel_nop); vmw_res_rel_nop);
} }
/** /**
* vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -1996,23 +1965,19 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, ...@@ -1996,23 +1965,19 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
if (unlikely(!dev_priv->has_mob)) if (unlikely(!dev_priv->has_mob))
return 0; return 0;
ret = vmw_shader_remove(vmw_context_res_man(ctx), ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
cmd->body.shid, cmd->body.type, &sw_context->staged_cmd_res);
cmd->body.type,
&sw_context->staged_cmd_res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
return vmw_resource_relocation_add(sw_context, return vmw_resource_relocation_add(sw_context, NULL,
NULL,
vmw_ptr_diff(sw_context->buf_start, vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id), &cmd->header.id),
vmw_res_rel_nop); vmw_res_rel_nop);
} }
/** /**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2047,9 +2012,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, ...@@ -2047,9 +2012,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (cmd->body.shid != SVGA3D_INVALID_ID) { if (cmd->body.shid != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(vmw_context_res_man(ctx), res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, cmd->body.shid, cmd->body.type);
cmd->body.type);
if (!IS_ERR(res)) { if (!IS_ERR(res)) {
ret = vmw_execbuf_res_noctx_val_add(sw_context, res, ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
VMW_RES_DIRTY_NONE); VMW_RES_DIRTY_NONE);
...@@ -2059,10 +2022,10 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, ...@@ -2059,10 +2022,10 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
} }
if (IS_ERR_OR_NULL(res)) { if (IS_ERR_OR_NULL(res)) {
ret = vmw_cmd_res_check(dev_priv, sw_context, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
vmw_res_shader, VMW_RES_DIRTY_NONE, VMW_RES_DIRTY_NONE,
user_shader_converter, user_shader_converter, &cmd->body.shid,
&cmd->body.shid, &res); &res);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} }
...@@ -2075,14 +2038,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, ...@@ -2075,14 +2038,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
binding.bi.res = res; binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_shader; binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
vmw_binding_add(ctx_info->staged, &binding.bi, vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
binding.shader_slot, 0);
return 0; return 0;
} }
/** /**
* vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2110,8 +2072,7 @@ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, ...@@ -2110,8 +2072,7 @@ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2125,13 +2086,12 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, ...@@ -2125,13 +2086,12 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
container_of(header, typeof(*cmd), header); container_of(header, typeof(*cmd), header);
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
user_shader_converter, user_shader_converter, &cmd->body.shid,
&cmd->body.shid, &cmd->body.mobid, &cmd->body.mobid, cmd->body.offsetInBytes);
cmd->body.offsetInBytes);
} }
/** /**
* vmw_cmd_dx_set_single_constant_buffer - Validate an * vmw_cmd_dx_set_single_constant_buffer - Validate
* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
...@@ -2175,15 +2135,15 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, ...@@ -2175,15 +2135,15 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
return -EINVAL; return -EINVAL;
} }
vmw_binding_add(ctx_node->staged, &binding.bi, vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
binding.shader_slot, binding.slot); binding.slot);
return 0; return 0;
} }
/** /**
* vmw_cmd_dx_set_shader_res - Validate an * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command * command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2213,8 +2173,7 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, ...@@ -2213,8 +2173,7 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2259,15 +2218,14 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, ...@@ -2259,15 +2218,14 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
binding.bi.bt = vmw_ctx_binding_dx_shader; binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
vmw_binding_add(ctx_node->staged, &binding.bi, vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
binding.shader_slot, 0);
return 0; return 0;
} }
/** /**
* vmw_cmd_dx_set_vertex_buffers - Validates an * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command * command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2314,15 +2272,14 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, ...@@ -2314,15 +2272,14 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
binding.stride = cmd->buf[i].stride; binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer; binding.slot = i + cmd->body.startBuffer;
vmw_binding_add(ctx_node->staged, &binding.bi, vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
0, binding.slot);
} }
return 0; return 0;
} }
/** /**
* vmw_cmd_dx_ia_set_vertex_buffers - Validate an * vmw_cmd_dx_ia_set_vertex_buffers - Validate
* SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
...@@ -2361,8 +2318,8 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, ...@@ -2361,8 +2318,8 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_dx_set_rendertarget - Validate an * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
* SVGA_3D_CMD_DX_SET_RENDERTARGETS command * command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2383,19 +2340,18 @@ static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, ...@@ -2383,19 +2340,18 @@ static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
return -EINVAL; return -EINVAL;
} }
ret = vmw_view_bindings_add(sw_context, vmw_view_ds, ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
vmw_ctx_binding_ds, 0, 0, &cmd->body.depthStencilViewId, 1, 0);
&cmd->body.depthStencilViewId, 1, 0);
if (ret) if (ret)
return ret; return ret;
return vmw_view_bindings_add(sw_context, vmw_view_rt, return vmw_view_bindings_add(sw_context, vmw_view_rt,
vmw_ctx_binding_dx_rt, 0, vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
(void *)&cmd[1], num_rt_view, 0); num_rt_view, 0);
} }
/** /**
* vmw_cmd_dx_clear_rendertarget_view - Validate an * vmw_cmd_dx_clear_rendertarget_view - Validate
* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
...@@ -2414,7 +2370,7 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, ...@@ -2414,7 +2370,7 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_dx_clear_rendertarget_view - Validate an * vmw_cmd_dx_clear_rendertarget_view - Validate
* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
...@@ -2442,8 +2398,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, ...@@ -2442,8 +2398,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
enum vmw_view_type view_type; enum vmw_view_type view_type;
int ret; int ret;
/* /*
* This is based on the fact that all affected define commands have * This is based on the fact that all affected define commands have the
* the same initial command body layout. * same initial command body layout.
*/ */
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
...@@ -2457,6 +2413,7 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, ...@@ -2457,6 +2413,7 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
view_type = vmw_view_cmd_to_type(header->id); view_type = vmw_view_cmd_to_type(header->id);
if (view_type == vmw_view_max) if (view_type == vmw_view_max)
return -EINVAL; return -EINVAL;
cmd = container_of(header, typeof(*cmd), header); cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
VMW_RES_DIRTY_NONE, user_surface_converter, VMW_RES_DIRTY_NONE, user_surface_converter,
...@@ -2469,19 +2426,14 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, ...@@ -2469,19 +2426,14 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
return vmw_view_add(sw_context->man, return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
ctx_node->ctx, cmd->defined_id, header,
srf,
view_type,
cmd->defined_id,
header,
header->size + sizeof(*header), header->size + sizeof(*header),
&sw_context->staged_cmd_res); &sw_context->staged_cmd_res);
} }
/** /**
* vmw_cmd_dx_set_so_targets - Validate an * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
* SVGA_3D_CMD_DX_SET_SOTARGETS command.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2505,8 +2457,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, ...@@ -2505,8 +2457,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
return -EINVAL; return -EINVAL;
cmd = container_of(header, typeof(*cmd), header); cmd = container_of(header, typeof(*cmd), header);
num = (cmd->header.size - sizeof(cmd->body)) / num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
sizeof(SVGA3dSoTarget);
if (num > SVGA3D_DX_MAX_SOTARGETS) { if (num > SVGA3D_DX_MAX_SOTARGETS) {
VMW_DEBUG_USER("Invalid DX SO binding.\n"); VMW_DEBUG_USER("Invalid DX SO binding.\n");
...@@ -2528,8 +2479,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, ...@@ -2528,8 +2479,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
binding.size = cmd->targets[i].sizeInBytes; binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i; binding.slot = i;
vmw_binding_add(ctx_node->staged, &binding.bi, vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
0, binding.slot);
} }
return 0; return 0;
...@@ -2564,8 +2514,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, ...@@ -2564,8 +2514,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_dx_check_subresource - Validate an * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
* SVGA_3D_CMD_DX_[X]_SUBRESOURCE command * command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2612,15 +2562,15 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, ...@@ -2612,15 +2562,15 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_dx_view_remove - validate a view remove command and * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
* schedule the view resource for removal. * resource for removal.
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream. * @header: Pointer to the command header in the command stream.
* *
* Check that the view exists, and if it was not created using this * Check that the view exists, and if it was not created using this command
* command batch, conditionally make this command a NOP. * batch, conditionally make this command a NOP.
*/ */
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
...@@ -2638,10 +2588,8 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, ...@@ -2638,10 +2588,8 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
if (!ctx_node) if (!ctx_node)
return -EINVAL; return -EINVAL;
ret = vmw_view_remove(sw_context->man, ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
cmd->body.view_id, view_type, &sw_context->staged_cmd_res, &view);
&sw_context->staged_cmd_res,
&view);
if (ret || !view) if (ret || !view)
return ret; return ret;
...@@ -2651,16 +2599,14 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, ...@@ -2651,16 +2599,14 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
* relocation to conditionally make this command a NOP to avoid * relocation to conditionally make this command a NOP to avoid
* device errors. * device errors.
*/ */
return vmw_resource_relocation_add(sw_context, return vmw_resource_relocation_add(sw_context, view,
view,
vmw_ptr_diff(sw_context->buf_start, vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id), &cmd->header.id),
vmw_res_rel_cond_nop); vmw_res_rel_cond_nop);
} }
/** /**
* vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2690,8 +2636,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, ...@@ -2690,8 +2636,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2716,8 +2661,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, ...@@ -2716,8 +2661,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
* command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2750,8 +2694,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, ...@@ -2750,8 +2694,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
ctx = ctx_node->ctx; ctx = ctx_node->ctx;
} }
res = vmw_shader_lookup(vmw_context_res_man(ctx), res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
cmd->body.shid, 0);
if (IS_ERR(res)) { if (IS_ERR(res)) {
VMW_DEBUG_USER("Could not find shader to bind.\n"); VMW_DEBUG_USER("Could not find shader to bind.\n");
return PTR_ERR(res); return PTR_ERR(res);
...@@ -2770,7 +2713,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, ...@@ -2770,7 +2713,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2788,8 +2731,8 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, ...@@ -2788,8 +2731,8 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_dx_transfer_from_buffer - * vmw_cmd_dx_transfer_from_buffer - Validate
* Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2815,8 +2758,7 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, ...@@ -2815,8 +2758,7 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
} }
/** /**
* vmw_cmd_intra_surface_copy - * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
* Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
* *
* @dev_priv: Pointer to a device private struct. * @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch. * @sw_context: The software context being used for this batch.
...@@ -2837,7 +2779,6 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, ...@@ -2837,7 +2779,6 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
&cmd->body.surface.sid, NULL); &cmd->body.surface.sid, NULL);
} }
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
void *buf, uint32_t *size) void *buf, uint32_t *size)
...@@ -3059,9 +3000,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { ...@@ -3059,9 +3000,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
false, false, true), false, false, true),
/* /* SM commands */
* DX commands
*/
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
false, false, true), false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
...@@ -3243,8 +3182,8 @@ bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) ...@@ -3243,8 +3182,8 @@ bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
} }
static int vmw_cmd_check(struct vmw_private *dev_priv, static int vmw_cmd_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context, void *buf,
void *buf, uint32_t *size) uint32_t *size)
{ {
uint32_t cmd_id; uint32_t cmd_id;
uint32_t size_remaining = *size; uint32_t size_remaining = *size;
...@@ -3309,8 +3248,7 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, ...@@ -3309,8 +3248,7 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
} }
static int vmw_cmd_check_all(struct vmw_private *dev_priv, static int vmw_cmd_check_all(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context, void *buf,
void *buf,
uint32_t size) uint32_t size)
{ {
int32_t cur_size = size; int32_t cur_size = size;
...@@ -3338,7 +3276,6 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv, ...@@ -3338,7 +3276,6 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
static void vmw_free_relocations(struct vmw_sw_context *sw_context) static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{ {
/* Memory is validation context memory, so no need to free it */ /* Memory is validation context memory, so no need to free it */
INIT_LIST_HEAD(&sw_context->bo_relocations); INIT_LIST_HEAD(&sw_context->bo_relocations);
} }
...@@ -3401,8 +3338,8 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, ...@@ -3401,8 +3338,8 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
* If this fails for some reason, We sync the fifo and return NULL. * If this fails for some reason, We sync the fifo and return NULL.
* It is then safe to fence buffers with a NULL pointer. * It is then safe to fence buffers with a NULL pointer.
* *
* If @p_handle is not NULL @file_priv must also not be NULL. Creates * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
* a userspace handle if @p_handle is not NULL, otherwise not. * userspace handle if @p_handle is not NULL, otherwise not.
*/ */
int vmw_execbuf_fence_commands(struct drm_file *file_priv, int vmw_execbuf_fence_commands(struct drm_file *file_priv,
...@@ -3430,9 +3367,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, ...@@ -3430,9 +3367,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
ret = vmw_fence_create(dev_priv->fman, sequence, p_fence); ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
if (unlikely(ret != 0 && !synced)) { if (unlikely(ret != 0 && !synced)) {
(void) vmw_fallback_wait(dev_priv, false, false, (void) vmw_fallback_wait(dev_priv, false, false, sequence,
sequence, false, false, VMW_FENCE_WAIT_TIMEOUT);
VMW_FENCE_WAIT_TIMEOUT);
*p_fence = NULL; *p_fence = NULL;
} }
...@@ -3440,36 +3376,32 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, ...@@ -3440,36 +3376,32 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
} }
/** /**
* vmw_execbuf_copy_fence_user - copy fence object information to * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
* user-space.
* *
* @dev_priv: Pointer to a vmw_private struct. * @dev_priv: Pointer to a vmw_private struct.
* @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
* @ret: Return value from fence object creation. * @ret: Return value from fence object creation.
* @user_fence_rep: User space address of a struct drm_vmw_fence_rep to * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
* which the information should be copied. * the information should be copied.
* @fence: Pointer to the fenc object. * @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle. * @fence_handle: User-space fence handle.
* @out_fence_fd: exported file descriptor for the fence. -1 if not used * @out_fence_fd: exported file descriptor for the fence. -1 if not used
* @sync_file: Only used to clean up in case of an error in this function. * @sync_file: Only used to clean up in case of an error in this function.
* *
* This function copies fence information to user-space. If copying fails, * This function copies fence information to user-space. If copying fails, the
* The user-space struct drm_vmw_fence_rep::error member is hopefully * user-space struct drm_vmw_fence_rep::error member is hopefully left
* left untouched, and if it's preloaded with an -EFAULT by user-space, * untouched, and if it's preloaded with an -EFAULT by user-space, the error
* the error will hopefully be detected. * will hopefully be detected.
* Also if copying fails, user-space will be unable to signal the fence *
* object so we wait for it immediately, and then unreference the * Also if copying fails, user-space will be unable to signal the fence object
* user-space reference. * so we wait for it immediately, and then unreference the user-space reference.
*/ */
void void
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp, struct vmw_fpriv *vmw_fp, int ret,
int ret,
struct drm_vmw_fence_rep __user *user_fence_rep, struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence, struct vmw_fence_obj *fence, uint32_t fence_handle,
uint32_t fence_handle, int32_t out_fence_fd, struct sync_file *sync_file)
int32_t out_fence_fd,
struct sync_file *sync_file)
{ {
struct drm_vmw_fence_rep fence_rep; struct drm_vmw_fence_rep fence_rep;
...@@ -3490,16 +3422,16 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ...@@ -3490,16 +3422,16 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
} }
/* /*
* copy_to_user errors will be detected by user space not * copy_to_user errors will be detected by user space not seeing
* seeing fence_rep::error filled in. Typically * fence_rep::error filled in. Typically user-space would have pre-set
* user-space would have pre-set that member to -EFAULT. * that member to -EFAULT.
*/ */
ret = copy_to_user(user_fence_rep, &fence_rep, ret = copy_to_user(user_fence_rep, &fence_rep,
sizeof(fence_rep)); sizeof(fence_rep));
/* /*
* User-space lost the fence object. We need to sync * User-space lost the fence object. We need to sync and unreference the
* and unreference the handle. * handle.
*/ */
if (unlikely(ret != 0) && (fence_rep.error == 0)) { if (unlikely(ret != 0) && (fence_rep.error == 0)) {
if (sync_file) if (sync_file)
...@@ -3510,8 +3442,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ...@@ -3510,8 +3442,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.fd = -1; fence_rep.fd = -1;
} }
ttm_ref_object_base_unref(vmw_fp->tfile, ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
fence_handle, TTM_REF_USAGE); TTM_REF_USAGE);
VMW_DEBUG_USER("Fence copy error. Syncing.\n"); VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false, (void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT); VMW_FENCE_WAIT_TIMEOUT);
...@@ -3519,20 +3451,18 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ...@@ -3519,20 +3451,18 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
} }
/** /**
* vmw_execbuf_submit_fifo - Patch a command batch and submit it using * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
* the fifo.
* *
* @dev_priv: Pointer to a device private structure. * @dev_priv: Pointer to a device private structure.
* @kernel_commands: Pointer to the unpatched command batch. * @kernel_commands: Pointer to the unpatched command batch.
* @command_size: Size of the unpatched command batch. * @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists. * @sw_context: Structure holding the relocation lists.
* *
* Side effects: If this function returns 0, then the command batch * Side effects: If this function returns 0, then the command batch pointed to
* pointed to by @kernel_commands will have been modified. * by @kernel_commands will have been modified.
*/ */
static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
void *kernel_commands, void *kernel_commands, u32 command_size,
u32 command_size,
struct vmw_sw_context *sw_context) struct vmw_sw_context *sw_context)
{ {
void *cmd; void *cmd;
...@@ -3557,16 +3487,16 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, ...@@ -3557,16 +3487,16 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
} }
/** /**
* vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
* the command buffer manager. * command buffer manager.
* *
* @dev_priv: Pointer to a device private structure. * @dev_priv: Pointer to a device private structure.
* @header: Opaque handle to the command buffer allocation. * @header: Opaque handle to the command buffer allocation.
* @command_size: Size of the unpatched command batch. * @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists. * @sw_context: Structure holding the relocation lists.
* *
* Side effects: If this function returns 0, then the command buffer * Side effects: If this function returns 0, then the command buffer represented
* represented by @header will have been modified. * by @header will have been modified.
*/ */
static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
struct vmw_cmdbuf_header *header, struct vmw_cmdbuf_header *header,
...@@ -3575,8 +3505,8 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, ...@@ -3575,8 +3505,8 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
{ {
u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id : u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
SVGA3D_INVALID_ID); SVGA3D_INVALID_ID);
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
id, false, header); header);
vmw_apply_relocations(sw_context); vmw_apply_relocations(sw_context);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
...@@ -3596,22 +3526,23 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, ...@@ -3596,22 +3526,23 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
* @header: Out parameter returning the opaque pointer to the command buffer. * @header: Out parameter returning the opaque pointer to the command buffer.
* *
* This function checks whether we can use the command buffer manager for * This function checks whether we can use the command buffer manager for
* submission and if so, creates a command buffer of suitable size and * submission and if so, creates a command buffer of suitable size and copies
* copies the user data into that buffer. * the user data into that buffer.
* *
* On successful return, the function returns a pointer to the data in the * On successful return, the function returns a pointer to the data in the
* command buffer and *@header is set to non-NULL. * command buffer and *@header is set to non-NULL.
* If command buffers could not be used, the function will return the value *
* of @kernel_commands on function call. That value may be NULL. In that case, * If command buffers could not be used, the function will return the value of
* the value of *@header will be set to NULL. * @kernel_commands on function call. That value may be NULL. In that case, the
* value of *@header will be set to NULL.
*
* If an error is encountered, the function will return a pointer error value. * If an error is encountered, the function will return a pointer error value.
* If the function is interrupted by a signal while sleeping, it will return * If the function is interrupted by a signal while sleeping, it will return
* -ERESTARTSYS casted to a pointer error value. * -ERESTARTSYS casted to a pointer error value.
*/ */
static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
void __user *user_commands, void __user *user_commands,
void *kernel_commands, void *kernel_commands, u32 command_size,
u32 command_size,
struct vmw_cmdbuf_header **header) struct vmw_cmdbuf_header **header)
{ {
size_t cmdbuf_size; size_t cmdbuf_size;
...@@ -3629,13 +3560,12 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, ...@@ -3629,13 +3560,12 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
/* If possible, add a little space for fencing. */ /* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512; cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
true, header); header);
if (IS_ERR(kernel_commands)) if (IS_ERR(kernel_commands))
return kernel_commands; return kernel_commands;
ret = copy_from_user(kernel_commands, user_commands, ret = copy_from_user(kernel_commands, user_commands, command_size);
command_size);
if (ret) { if (ret) {
VMW_DEBUG_USER("Failed copying commands.\n"); VMW_DEBUG_USER("Failed copying commands.\n");
vmw_cmdbuf_header_free(*header); vmw_cmdbuf_header_free(*header);
...@@ -3683,14 +3613,11 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, ...@@ -3683,14 +3613,11 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
int vmw_execbuf_process(struct drm_file *file_priv, int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv, struct vmw_private *dev_priv,
void __user *user_commands, void __user *user_commands, void *kernel_commands,
void *kernel_commands, uint32_t command_size, uint64_t throttle_us,
uint32_t command_size,
uint64_t throttle_us,
uint32_t dx_context_handle, uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep, struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence, uint32_t flags)
uint32_t flags)
{ {
struct vmw_sw_context *sw_context = &dev_priv->ctx; struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL; struct vmw_fence_obj *fence = NULL;
...@@ -3739,18 +3666,18 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3739,18 +3666,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unlock; goto out_unlock;
ret = copy_from_user(sw_context->cmd_bounce, user_commands,
ret = copy_from_user(sw_context->cmd_bounce, command_size);
user_commands, command_size);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ret = -EFAULT; ret = -EFAULT;
VMW_DEBUG_USER("Failed copying commands.\n"); VMW_DEBUG_USER("Failed copying commands.\n");
goto out_unlock; goto out_unlock;
} }
kernel_commands = sw_context->cmd_bounce; kernel_commands = sw_context->cmd_bounce;
} else if (!header) } else if (!header) {
sw_context->kernel = true; sw_context->kernel = true;
}
sw_context->fp = vmw_fpriv(file_priv); sw_context->fp = vmw_fpriv(file_priv);
INIT_LIST_HEAD(&sw_context->ctx_list); INIT_LIST_HEAD(&sw_context->ctx_list);
...@@ -3763,6 +3690,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3763,6 +3690,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->res_relocations); INIT_LIST_HEAD(&sw_context->res_relocations);
INIT_LIST_HEAD(&sw_context->bo_relocations); INIT_LIST_HEAD(&sw_context->bo_relocations);
if (sw_context->staged_bindings) if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings); vmw_binding_state_reset(sw_context->staged_bindings);
...@@ -3770,8 +3698,10 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3770,8 +3698,10 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unlock; goto out_unlock;
sw_context->res_ht_initialized = true; sw_context->res_ht_initialized = true;
} }
INIT_LIST_HEAD(&sw_context->staged_cmd_res); INIT_LIST_HEAD(&sw_context->staged_cmd_res);
sw_context->ctx = &val_ctx; sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
...@@ -3798,6 +3728,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3798,6 +3728,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = vmw_validation_res_validate(&val_ctx, true); ret = vmw_validation_res_validate(&val_ctx, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
vmw_validation_drop_ht(&val_ctx); vmw_validation_drop_ht(&val_ctx);
ret = mutex_lock_interruptible(&dev_priv->binding_mutex); ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
...@@ -3825,15 +3756,13 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3825,15 +3756,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err; goto out_err;
vmw_query_bo_switch_commit(dev_priv, sw_context); vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
&fence,
(user_fence_rep) ? &handle : NULL); (user_fence_rep) ? &handle : NULL);
/* /*
* This error is harmless, because if fence submission fails, * This error is harmless, because if fence submission fails,
* vmw_fifo_send_fence will sync. The error will be propagated to * vmw_fifo_send_fence will sync. The error will be propagated to
* user-space in @fence_rep * user-space in @fence_rep
*/ */
if (ret != 0) if (ret != 0)
VMW_DEBUG_USER("Fence submission error. Syncing.\n"); VMW_DEBUG_USER("Fence submission error. Syncing.\n");
...@@ -3843,15 +3772,13 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3843,15 +3772,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_validation_bo_fence(sw_context->ctx, fence); vmw_validation_bo_fence(sw_context->ctx, fence);
if (unlikely(dev_priv->pinned_bo != NULL && if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence); __vmw_execbuf_release_pinned_bo(dev_priv, fence);
/* /*
* If anything fails here, give up trying to export the fence * If anything fails here, give up trying to export the fence and do a
* and do a sync since the user mode will not be able to sync * sync since the user mode will not be able to sync the fence itself.
* the fence itself. This ensures we are still functionally * This ensures we are still functionally correct.
* correct.
*/ */
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
...@@ -3870,8 +3797,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3870,8 +3797,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} }
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle, user_fence_rep, fence, handle, out_fence_fd,
out_fence_fd, sync_file); sync_file);
/* Don't unreference when handing fence out */ /* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) { if (unlikely(out_fence != NULL)) {
...@@ -3885,8 +3812,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3885,8 +3812,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
/* /*
* Unreference resources outside of the cmdbuf_mutex to * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
* avoid deadlocks in resource destruction paths. * in resource destruction paths.
*/ */
vmw_validation_unref_lists(&val_ctx); vmw_validation_unref_lists(&val_ctx);
...@@ -3901,8 +3828,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3901,8 +3828,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_validation_res_unreserve(&val_ctx, true); vmw_validation_res_unreserve(&val_ctx, true);
vmw_resource_relocations_free(&sw_context->res_relocations); vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context); vmw_free_relocations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL && if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL); __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock: out_unlock:
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
...@@ -3911,8 +3837,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3911,8 +3837,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
/* /*
* Unreference resources outside of the cmdbuf_mutex to * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
* avoid deadlocks in resource destruction paths. * in resource destruction paths.
*/ */
vmw_validation_unref_lists(&val_ctx); vmw_validation_unref_lists(&val_ctx);
out_free_header: out_free_header:
...@@ -3930,9 +3856,9 @@ int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -3930,9 +3856,9 @@ int vmw_execbuf_process(struct drm_file *file_priv,
* *
* @dev_priv: The device private structure. * @dev_priv: The device private structure.
* *
* This function is called to idle the fifo and unpin the query buffer * This function is called to idle the fifo and unpin the query buffer if the
* if the normal way to do this hits an error, which should typically be * normal way to do this hits an error, which should typically be extremely
* extremely rare. * rare.
*/ */
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
{ {
...@@ -3948,28 +3874,27 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) ...@@ -3948,28 +3874,27 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
/** /**
* __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
* query bo. * bo.
* *
* @dev_priv: The device private structure. * @dev_priv: The device private structure.
* @fence: If non-NULL should point to a struct vmw_fence_obj issued * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
* _after_ a query barrier that flushes all queries touching the current * query barrier that flushes all queries touching the current buffer pointed to
* buffer pointed to by @dev_priv->pinned_bo * by @dev_priv->pinned_bo
* *
* This function should be used to unpin the pinned query bo, or * This function should be used to unpin the pinned query bo, or as a query
* as a query barrier when we need to make sure that all queries have * barrier when we need to make sure that all queries have finished before the
* finished before the next fifo command. (For example on hardware * next fifo command. (For example on hardware context destructions where the
* context destructions where the hardware may otherwise leak unfinished * hardware may otherwise leak unfinished queries).
* queries).
* *
* This function does not return any failure codes, but make attempts * This function does not return any failure codes, but make attempts to do safe
* to do safe unpinning in case of errors. * unpinning in case of errors.
* *
* The function will synchronize on the previous query barrier, and will * The function will synchronize on the previous query barrier, and will thus
* thus not finish until that barrier has executed. * not finish until that barrier has executed.
* *
* the @dev_priv->cmdbuf_mutex needs to be held by the current thread * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
* before calling this function. * calling this function.
*/ */
void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence) struct vmw_fence_obj *fence)
...@@ -4019,35 +3944,32 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -4019,35 +3944,32 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
vmw_validation_unref_lists(&val_ctx); vmw_validation_unref_lists(&val_ctx);
vmw_bo_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock: out_unlock:
return; return;
out_no_emit: out_no_emit:
vmw_validation_bo_backoff(&val_ctx); vmw_validation_bo_backoff(&val_ctx);
out_no_reserve: out_no_reserve:
vmw_validation_unref_lists(&val_ctx); vmw_validation_unref_lists(&val_ctx);
vmw_execbuf_unpin_panic(dev_priv); vmw_execbuf_unpin_panic(dev_priv);
vmw_bo_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
} }
/** /**
* vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
* query bo.
* *
* @dev_priv: The device private structure. * @dev_priv: The device private structure.
* *
* This function should be used to unpin the pinned query bo, or * This function should be used to unpin the pinned query bo, or as a query
* as a query barrier when we need to make sure that all queries have * barrier when we need to make sure that all queries have finished before the
* finished before the next fifo command. (For example on hardware * next fifo command. (For example on hardware context destructions where the
* context destructions where the hardware may otherwise leak unfinished * hardware may otherwise leak unfinished queries).
* queries).
* *
* This function does not return any failure codes, but make attempts * This function does not return any failure codes, but make attempts to do safe
* to do safe unpinning in case of errors. * unpinning in case of errors.
* *
* The function will synchronize on the previous query barrier, and will * The function will synchronize on the previous query barrier, and will thus
* thus not finish until that barrier has executed. * not finish until that barrier has executed.
*/ */
void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
{ {
...@@ -4078,12 +4000,9 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, ...@@ -4078,12 +4000,9 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
return -EFAULT; return -EFAULT;
/* /*
* Extend the ioctl argument while * Extend the ioctl argument while maintaining backwards compatibility:
* maintaining backwards compatibility: * We take different code paths depending on the value of arg.version.
* We take different code paths depending on the value of
* arg.version.
*/ */
if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION || if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
arg.version == 0)) { arg.version == 0)) {
VMW_DEBUG_USER("Incorrect execbuf version.\n"); VMW_DEBUG_USER("Incorrect execbuf version.\n");
...@@ -4093,8 +4012,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, ...@@ -4093,8 +4012,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
if (arg.version > 1 && if (arg.version > 1 &&
copy_from_user(&arg.context_handle, copy_from_user(&arg.context_handle,
(void __user *) (data + copy_offset[0]), (void __user *) (data + copy_offset[0]),
copy_offset[arg.version - 1] - copy_offset[arg.version - 1] - copy_offset[0]) != 0)
copy_offset[0]) != 0)
return -EFAULT; return -EFAULT;
switch (arg.version) { switch (arg.version) {
...@@ -4106,7 +4024,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, ...@@ -4106,7 +4024,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
break; break;
} }
/* If imported a fence FD from elsewhere, then wait on it */ /* If imported a fence FD from elsewhere, then wait on it */
if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) { if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
in_fence = sync_file_get_fence(arg.imported_fence_fd); in_fence = sync_file_get_fence(arg.imported_fence_fd);
...@@ -4130,8 +4047,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, ...@@ -4130,8 +4047,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
NULL, arg.command_size, arg.throttle_us, NULL, arg.command_size, arg.throttle_us,
arg.context_handle, arg.context_handle,
(void __user *)(unsigned long)arg.fence_rep, (void __user *)(unsigned long)arg.fence_rep,
NULL, NULL, arg.flags);
arg.flags);
ttm_read_unlock(&dev_priv->reservation_sem); ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment