Commit 2724b2d5 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Use new validation interface for the modesetting code v2

Strip the old KMS helpers and use the new validation interface also in
the modesetting code.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com> #v1
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
parent 9c079b8c
...@@ -2557,88 +2557,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, ...@@ -2557,88 +2557,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
} }
/** /**
* vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before * vmw_kms_helper_validation_finish - Helper for post KMS command submission
* command submission. * cleanup and fencing
* * @dev_priv: Pointer to the device-private struct
* @dev_priv. Pointer to a device private structure. * @file_priv: Pointer identifying the client when user-space fencing is used
* @buf: The buffer object * @ctx: Pointer to the validation context
* @interruptible: Whether to perform waits as interruptible. * @out_fence: If non-NULL, returned refcounted fence-pointer
* @validate_as_mob: Whether the buffer should be validated as a MOB. If false, * @user_fence_rep: If non-NULL, pointer to user-space address area
* The buffer will be validated as a GMR. Already pinned buffers will not be * in which to copy user-space fence info
* validated.
*
* Returns 0 on success, negative error code on failure, -ERESTARTSYS if
* interrupted by a signal.
*/ */
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf, struct drm_file *file_priv,
bool interruptible, struct vmw_validation_context *ctx,
bool validate_as_mob, struct vmw_fence_obj **out_fence,
bool for_cpu_blit) struct drm_vmw_fence_rep __user *
{ user_fence_rep)
struct ttm_operation_ctx ctx = { {
.interruptible = interruptible, struct vmw_fence_obj *fence = NULL;
.no_wait_gpu = false};
struct ttm_buffer_object *bo = &buf->base;
int ret;
ttm_bo_reserve(bo, false, false, NULL);
if (for_cpu_blit)
ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
else
ret = vmw_validation_bo_validate_single(bo, interruptible,
validate_as_mob);
if (ret)
ttm_bo_unreserve(bo);
return ret;
}
/**
* vmw_kms_helper_buffer_revert - Undo the actions of
* vmw_kms_helper_buffer_prepare.
*
* @res: Pointer to the buffer object.
*
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_buffer_prepare.
*/
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
{
if (buf)
ttm_bo_unreserve(&buf->base);
}
/**
* vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
* kms command submission.
*
* @dev_priv: Pointer to a device private structure.
* @file_priv: Pointer to a struct drm_file representing the caller's
* connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
* if non-NULL, @user_fence_rep must be non-NULL.
* @buf: The buffer object.
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here.
* @user_fence_rep: Optional pointer to a user-space provided struct
* drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
* function copies fence data to user-space in a fail-safe manner.
*/
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep)
{
struct vmw_fence_obj *fence;
uint32_t handle; uint32_t handle;
int ret; int ret;
ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
file_priv ? &handle : NULL); out_fence)
if (buf) ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
vmw_bo_fence_single(&buf->base, fence); file_priv ? &handle : NULL);
vmw_validation_done(ctx, fence);
if (file_priv) if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence, ret, user_fence_rep, fence,
...@@ -2647,106 +2590,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, ...@@ -2647,106 +2590,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
*out_fence = fence; *out_fence = fence;
else else
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
vmw_kms_helper_buffer_revert(buf);
}
/**
* vmw_kms_helper_resource_revert - Undo the actions of
* vmw_kms_helper_resource_prepare.
*
* @res: Pointer to the resource. Typically a surface.
*
* Helper to be used if an error forces the caller to undo the actions of
* vmw_kms_helper_resource_prepare.
*/
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
{
struct vmw_resource *res = ctx->res;
vmw_kms_helper_buffer_revert(ctx->buf);
vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
/**
* vmw_kms_helper_resource_prepare - Reserve and validate a resource before
* command submission.
*
* @res: Pointer to the resource. Typically a surface.
* @interruptible: Whether to perform waits as interruptible.
*
* Reserves and validates also the backup buffer if a guest-backed resource.
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted by a signal.
*/
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
bool interruptible,
struct vmw_validation_ctx *ctx)
{
int ret = 0;
ctx->buf = NULL;
ctx->res = res;
if (interruptible)
ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
else
mutex_lock(&res->dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
ret = vmw_resource_reserve(res, interruptible, false);
if (ret)
goto out_unlock;
if (res->backup) {
ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
interruptible,
res->dev_priv->has_mob,
false);
if (ret)
goto out_unreserve;
ctx->buf = vmw_bo_reference(res->backup);
}
ret = vmw_resource_validate(res, interruptible);
if (ret)
goto out_revert;
return 0;
out_revert:
vmw_kms_helper_buffer_revert(ctx->buf);
out_unreserve:
vmw_resource_unreserve(res, false, NULL, 0);
out_unlock:
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
return ret;
}
/**
* vmw_kms_helper_resource_finish - Unreserve and fence a resource after
* kms command submission.
*
* @res: Pointer to the resource. Typically a surface.
* @out_fence: Optional pointer to a fence pointer. If non-NULL, a
* ref-counted fence pointer is returned here.
*/
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
struct vmw_fence_obj **out_fence)
{
struct vmw_resource *res = ctx->res;
if (ctx->buf || out_fence)
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
vmw_bo_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
} }
/** /**
......
...@@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, ...@@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int increment, int increment,
struct vmw_kms_dirty *dirty); struct vmw_kms_dirty *dirty);
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf, struct drm_file *file_priv,
bool interruptible, struct vmw_validation_context *ctx,
bool validate_as_mob, struct vmw_fence_obj **out_fence,
bool for_cpu_blit); struct drm_vmw_fence_rep __user *
void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf); user_fence_rep);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user *
user_fence_rep);
int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
bool interruptible,
struct vmw_validation_ctx *ctx);
void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
struct vmw_fence_obj **out_fence);
int vmw_kms_readback(struct vmw_private *dev_priv, int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
struct vmw_framebuffer *vfb, struct vmw_framebuffer *vfb,
......
...@@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs = struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base); container_of(framebuffer, typeof(*vfbs), base);
struct vmw_kms_sou_surface_dirty sdirty; struct vmw_kms_sou_surface_dirty sdirty;
struct vmw_validation_ctx ctx; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret; int ret;
if (!srf) if (!srf)
srf = &vfbs->surface->res; srf = &vfbs->surface->res;
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
if (ret)
goto out_unref;
sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit; sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
sdirty.base.clip = vmw_sou_surface_clip; sdirty.base.clip = vmw_sou_surface_clip;
sdirty.base.dev_priv = dev_priv; sdirty.base.dev_priv = dev_priv;
...@@ -972,9 +976,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -972,9 +976,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc, dest_x, dest_y, num_clips, inc,
&sdirty.base); &sdirty.base);
vmw_kms_helper_resource_finish(&ctx, out_fence); vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
NULL);
return ret; return ret;
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret;
} }
/** /**
...@@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, ...@@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
container_of(framebuffer, struct vmw_framebuffer_bo, container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer; base)->buffer;
struct vmw_kms_dirty dirty; struct vmw_kms_dirty dirty;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret; int ret;
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
false, false);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
if (ret)
goto out_unref;
ret = do_bo_define_gmrfb(dev_priv, framebuffer); ret = do_bo_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_revert; goto out_revert;
...@@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv, ...@@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
num_clips; num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty); 0, 0, num_clips, increment, &dirty);
vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL); vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
NULL);
return ret; return ret;
out_revert: out_revert:
vmw_kms_helper_buffer_revert(buf); vmw_validation_revert(&val_ctx);
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret; return ret;
} }
...@@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, ...@@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf = struct vmw_buffer_object *buf =
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty; struct vmw_kms_dirty dirty;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret; int ret;
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false, ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
false);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, NULL, true);
if (ret)
goto out_unref;
ret = do_bo_define_gmrfb(dev_priv, vfb); ret = do_bo_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_revert; goto out_revert;
...@@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, ...@@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
num_clips; num_clips;
ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips, ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
0, 0, num_clips, 1, &dirty); 0, 0, num_clips, 1, &dirty);
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL, vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
user_fence_rep); user_fence_rep);
return ret; return ret;
out_revert: out_revert:
vmw_kms_helper_buffer_revert(buf); vmw_validation_revert(&val_ctx);
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret; return ret;
} }
...@@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, ...@@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
struct vmw_stdu_dirty ddirty; struct vmw_stdu_dirty ddirty;
int ret; int ret;
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D); bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
/* /*
* VMs without 3D support don't have the surface DMA command and * VMs without 3D support don't have the surface DMA command and
* we'll be using a CPU blit, and the framebuffer should be moved out * we'll be using a CPU blit, and the framebuffer should be moved out
* of VRAM. * of VRAM.
*/ */
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
false, cpu_blit);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
if (ret)
goto out_unref;
ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM : ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
SVGA3D_READ_HOST_VRAM; SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX; ddirty.left = ddirty.top = S32_MAX;
...@@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, ...@@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips, ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
0, 0, num_clips, increment, &ddirty.base); 0, 0, num_clips, increment, &ddirty.base);
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
user_fence_rep);
vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
user_fence_rep);
return ret;
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret; return ret;
} }
...@@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, ...@@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs = struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base); container_of(framebuffer, typeof(*vfbs), base);
struct vmw_stdu_dirty sdirty; struct vmw_stdu_dirty sdirty;
struct vmw_validation_ctx ctx; DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret; int ret;
if (!srf) if (!srf)
srf = &vfbs->surface->res; srf = &vfbs->surface->res;
ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
if (ret) if (ret)
return ret; return ret;
ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
if (ret)
goto out_unref;
if (vfbs->is_bo_proxy) { if (vfbs->is_bo_proxy) {
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
if (ret) if (ret)
...@@ -954,8 +966,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, ...@@ -954,8 +966,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
dest_x, dest_y, num_clips, inc, dest_x, dest_y, num_clips, inc,
&sdirty.base); &sdirty.base);
out_finish: out_finish:
vmw_kms_helper_resource_finish(&ctx, out_fence); vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
NULL);
return ret;
out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment