Commit fbe86ca5 authored by Christian König's avatar Christian König

drm/vmwgfx: switch over to the new pin interface v2

Stop using TTM_PL_FLAG_NO_EVICT.

v2: fix unconditional pinning
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Reviewed-by: default avatarHuang Rui <ray.huang@amd.com>
Link: https://patchwork.freedesktop.org/patch/391601/?series=81973&rev=1
parent b254557c
......@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
int ret = 0;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
if (!(dst->pin_count))
dma_resv_assert_held(dst->base.resv);
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
if (!(src->pin_count))
dma_resv_assert_held(src->base.resv);
if (!ttm_tt_is_populated(dst->ttm)) {
......
......@@ -106,7 +106,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
if (buf->pin_count > 0)
if (buf->base.pin_count > 0)
ret = ttm_bo_mem_compat(placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
......@@ -155,7 +155,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
if (buf->pin_count > 0) {
if (buf->base.pin_count > 0) {
ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
goto out_unreserve;
......@@ -246,12 +246,12 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0 &&
buf->pin_count == 0) {
buf->base.pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
if (buf->pin_count > 0)
if (buf->base.pin_count > 0)
ret = ttm_bo_mem_compat(&placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
......@@ -343,23 +343,13 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
dma_resv_assert_held(bo->base.resv);
if (pin) {
if (vbo->pin_count++ > 0)
if (pin == !!bo->pin_count)
return;
} else {
WARN_ON(vbo->pin_count <= 0);
if (--vbo->pin_count > 0)
return;
}
pl.fpfn = 0;
pl.lpfn = 0;
pl.mem_type = bo->mem.mem_type;
pl.flags = bo->mem.placement;
if (pin)
pl.flags |= TTM_PL_FLAG_NO_EVICT;
else
pl.flags &= ~TTM_PL_FLAG_NO_EVICT;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
......@@ -368,8 +358,12 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
if (pin)
ttm_bo_pin(bo);
else
ttm_bo_unpin(bo);
}
/**
* vmw_bo_map_and_cache - Map a buffer object and cache the map
......@@ -539,6 +533,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
* @size: Buffer object size in bytes.
* @placement: Initial placement.
* @interruptible: Whether waits should be performed interruptible.
* @pin: If the BO should be created pinned at a fixed location.
* @bo_free: The buffer object destructor.
* Returns: Zero on success, negative error code on error.
*
......@@ -547,9 +542,10 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo))
{
struct ttm_operation_ctx ctx = { interruptible, false };
struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
......@@ -563,11 +559,16 @@ int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT;
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, interruptible, acc_size,
NULL, NULL, bo_free);
0, &ctx, acc_size, NULL, NULL, bo_free);
if (unlikely(ret))
return ret;
if (pin)
ttm_bo_pin(&vmw_bo->base);
ttm_bo_unreserve(&vmw_bo->base);
return 0;
}
......@@ -656,7 +657,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
(dev_priv->has_mob) ?
&vmw_sys_placement :
&vmw_vram_sys_placement, true,
&vmw_vram_sys_placement, true, false,
&vmw_user_bo_destroy);
if (unlikely(ret != 0))
return ret;
......
......@@ -410,8 +410,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (!buf)
return -ENOMEM;
ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
true, vmw_bo_bo_free);
ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement,
true, true, vmw_bo_bo_free);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
......
......@@ -372,7 +372,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
return -ENOMEM;
ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
&vmw_sys_ne_placement, false,
&vmw_sys_placement, false, true,
&vmw_bo_bo_free);
if (unlikely(ret != 0))
return ret;
......
......@@ -99,7 +99,6 @@ struct vmw_fpriv {
* struct vmw_buffer_object - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object
* @res_tree: RB tree of resources using this buffer object as a backing MOB
* @pin_count: pin depth
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
......@@ -110,7 +109,6 @@ struct vmw_fpriv {
struct vmw_buffer_object {
struct ttm_buffer_object base;
struct rb_root res_tree;
s32 pin_count;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
......@@ -852,7 +850,7 @@ extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interruptible,
bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
......@@ -1009,15 +1007,12 @@ extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
extern struct ttm_placement vmw_sys_ne_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern const struct vmw_sg_table *
......
......@@ -406,7 +406,7 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
ret = vmw_bo_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement,
false,
false, false,
&vmw_bo_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
......
......@@ -370,7 +370,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
res->func->backup_placement,
interruptible,
interruptible, false,
&vmw_bo_bo_free);
if (unlikely(ret != 0))
goto out_no_bo;
......@@ -1002,7 +1002,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
vbo = res->backup;
ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
if (!vbo->pin_count) {
if (!vbo->base.pin_count) {
ret = ttm_bo_validate
(&vbo->base,
res->func->backup_placement,
......
......@@ -451,8 +451,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
*/
vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_init(dev_priv, vps->bo, size,
&vmw_vram_ne_placement,
false, &vmw_bo_bo_free);
&vmw_vram_placement,
false, true, &vmw_bo_bo_free);
vmw_overlay_resume_all(dev_priv);
if (ret) {
vps->bo = NULL; /* vmw_bo_init frees on error */
......
......@@ -978,8 +978,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(!buf))
return -ENOMEM;
ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
true, vmw_bo_bo_free);
ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement,
true, true, vmw_bo_bo_free);
if (unlikely(ret != 0))
goto out;
......
......@@ -37,13 +37,6 @@ static const struct ttm_place vram_placement_flags = {
.flags = TTM_PL_FLAG_CACHED
};
static const struct ttm_place vram_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
......@@ -51,13 +44,6 @@ static const struct ttm_place sys_placement_flags = {
.flags = TTM_PL_FLAG_CACHED
};
static const struct ttm_place sys_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
......@@ -72,13 +58,6 @@ static const struct ttm_place mob_placement_flags = {
.flags = TTM_PL_FLAG_CACHED
};
static const struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_MOB,
.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
......@@ -128,13 +107,6 @@ struct ttm_placement vmw_vram_sys_placement = {
.busy_placement = &sys_placement_flags
};
struct ttm_placement vmw_vram_ne_placement = {
.num_placement = 1,
.placement = &vram_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_ne_placement_flags
};
struct ttm_placement vmw_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
......@@ -142,13 +114,6 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
struct ttm_placement vmw_sys_ne_placement = {
.num_placement = 1,
.placement = &sys_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_ne_placement_flags
};
static const struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
......@@ -213,13 +178,6 @@ struct ttm_placement vmw_mob_placement = {
.busy_placement = &mob_placement_flags
};
struct ttm_placement vmw_mob_ne_placement = {
.num_placement = 1,
.num_busy_placement = 1,
.placement = &mob_ne_placement_flags,
.busy_placement = &mob_ne_placement_flags
};
struct ttm_placement vmw_nonfixed_placement = {
.num_placement = 3,
.placement = nonfixed_placement_flags,
......
......@@ -540,7 +540,7 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
if (atomic_read(&vbo->cpu_writers))
return -EBUSY;
if (vbo->pin_count > 0)
if (vbo->base.pin_count > 0)
return 0;
if (validate_as_mob)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment