Commit 76f9764c authored by Chris Wilson's avatar Chris Wilson

drm/i915: Introduce a vma.kref

Start introducing a kref on i915_vma in order to protect the vma unbind
(i915_gem_object_unbind) from a parallel destruction (i915_vma_parked).
Later, we will use the refcount to manage all access and turn i915_vma
into a first class container.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Acked-by: default avatarImre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191222210256.2066451-2-chris@chris-wilson.co.uk
parent f5af1659
...@@ -194,7 +194,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -194,7 +194,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(vma->obj != obj); GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock); spin_unlock(&obj->vma.lock);
i915_vma_destroy(vma); __i915_vma_put(vma);
spin_lock(&obj->vma.lock); spin_lock(&obj->vma.lock);
} }
......
...@@ -1110,8 +1110,7 @@ static int __igt_write_huge(struct intel_context *ce, ...@@ -1110,8 +1110,7 @@ static int __igt_write_huge(struct intel_context *ce,
out_vma_unpin: out_vma_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
out_vma_close: out_vma_close:
i915_vma_destroy(vma); __i915_vma_put(vma);
return err; return err;
} }
......
...@@ -163,7 +163,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, ...@@ -163,7 +163,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
kunmap(p); kunmap(p);
out: out:
i915_vma_destroy(vma); __i915_vma_put(vma);
return err; return err;
} }
...@@ -257,7 +257,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, ...@@ -257,7 +257,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
if (err) if (err)
return err; return err;
i915_vma_destroy(vma); __i915_vma_put(vma);
if (igt_timeout(end_time, if (igt_timeout(end_time,
"%s: timed out after tiling=%d stride=%d\n", "%s: timed out after tiling=%d stride=%d\n",
......
...@@ -136,7 +136,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, ...@@ -136,7 +136,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
struct i915_vma, struct i915_vma,
obj_link))) { obj_link))) {
struct i915_address_space *vm = vma->vm; struct i915_address_space *vm = vma->vm;
bool awake = false;
list_move_tail(&vma->obj_link, &still_in_list); list_move_tail(&vma->obj_link, &still_in_list);
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
...@@ -147,26 +146,18 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, ...@@ -147,26 +146,18 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
break; break;
/* Prevent vma being freed by i915_vma_parked as we unbind */ /* Prevent vma being freed by i915_vma_parked as we unbind */
if (intel_gt_pm_get_if_awake(vm->gt)) { vma = __i915_vma_get(vma);
awake = true;
} else {
if (i915_vma_is_closed(vma)) {
spin_unlock(&obj->vma.lock);
i915_vma_parked(vm->gt);
goto err_vm;
}
}
spin_unlock(&obj->vma.lock); spin_unlock(&obj->vma.lock);
ret = -EBUSY; if (vma) {
if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || ret = -EBUSY;
!i915_vma_is_active(vma)) if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
ret = i915_vma_unbind(vma); !i915_vma_is_active(vma))
ret = i915_vma_unbind(vma);
__i915_vma_put(vma);
}
if (awake)
intel_gt_pm_put(vm->gt);
err_vm:
i915_vm_close(vm); i915_vm_close(vm);
spin_lock(&obj->vma.lock); spin_lock(&obj->vma.lock);
} }
......
...@@ -534,7 +534,7 @@ void __i915_vm_close(struct i915_address_space *vm) ...@@ -534,7 +534,7 @@ void __i915_vm_close(struct i915_address_space *vm)
atomic_and(~I915_VMA_PIN_MASK, &vma->flags); atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
WARN_ON(__i915_vma_unbind(vma)); WARN_ON(__i915_vma_unbind(vma));
i915_vma_destroy(vma); __i915_vma_put(vma);
i915_gem_object_put(obj); i915_gem_object_put(obj);
} }
...@@ -1812,7 +1812,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) ...@@ -1812,7 +1812,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{ {
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
i915_vma_destroy(ppgtt->vma); __i915_vma_put(ppgtt->vma);
gen6_ppgtt_free_pd(ppgtt); gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm); free_scratch(vm);
...@@ -1895,6 +1895,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) ...@@ -1895,6 +1895,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
i915_active_init(&vma->active, NULL, NULL); i915_active_init(&vma->active, NULL, NULL);
kref_init(&vma->ref);
mutex_init(&vma->pages_mutex); mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(&ggtt->vm); vma->vm = i915_vm_get(&ggtt->vm);
vma->ops = &pd_vma_ops; vma->ops = &pd_vma_ops;
......
...@@ -114,6 +114,7 @@ vma_create(struct drm_i915_gem_object *obj, ...@@ -114,6 +114,7 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL) if (vma == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
kref_init(&vma->ref);
mutex_init(&vma->pages_mutex); mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(vm); vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops; vma->ops = &vm->vma_ops;
...@@ -1021,8 +1022,10 @@ void i915_vma_reopen(struct i915_vma *vma) ...@@ -1021,8 +1022,10 @@ void i915_vma_reopen(struct i915_vma *vma)
__i915_vma_remove_closed(vma); __i915_vma_remove_closed(vma);
} }
void i915_vma_destroy(struct i915_vma *vma) void i915_vma_release(struct kref *ref)
{ {
struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
if (drm_mm_node_allocated(&vma->node)) { if (drm_mm_node_allocated(&vma->node)) {
mutex_lock(&vma->vm->mutex); mutex_lock(&vma->vm->mutex);
atomic_and(~I915_VMA_PIN_MASK, &vma->flags); atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
...@@ -1072,7 +1075,7 @@ void i915_vma_parked(struct intel_gt *gt) ...@@ -1072,7 +1075,7 @@ void i915_vma_parked(struct intel_gt *gt)
spin_unlock_irq(&gt->closed_lock); spin_unlock_irq(&gt->closed_lock);
if (obj) { if (obj) {
i915_vma_destroy(vma); __i915_vma_put(vma);
i915_gem_object_put(obj); i915_gem_object_put(obj);
} }
...@@ -1236,7 +1239,7 @@ int __i915_vma_unbind(struct i915_vma *vma) ...@@ -1236,7 +1239,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_detach(vma); i915_vma_detach(vma);
vma_unbind_pages(vma); vma_unbind_pages(vma);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0; return 0;
} }
......
...@@ -51,14 +51,19 @@ enum i915_cache_level; ...@@ -51,14 +51,19 @@ enum i915_cache_level;
*/ */
struct i915_vma { struct i915_vma {
struct drm_mm_node node; struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm; struct i915_address_space *vm;
const struct i915_vma_ops *ops; const struct i915_vma_ops *ops;
struct i915_fence_reg *fence;
struct drm_i915_gem_object *obj;
struct dma_resv *resv; /** Alias of obj->resv */ struct dma_resv *resv; /** Alias of obj->resv */
struct sg_table *pages; struct sg_table *pages;
void __iomem *iomap; void __iomem *iomap;
void *private; /* owned by creator */ void *private; /* owned by creator */
struct i915_fence_reg *fence;
u64 size; u64 size;
u64 display_alignment; u64 display_alignment;
struct i915_page_sizes page_sizes; struct i915_page_sizes page_sizes;
...@@ -74,6 +79,7 @@ struct i915_vma { ...@@ -74,6 +79,7 @@ struct i915_vma {
* handles (but same file) for execbuf, i.e. the number of aliases * handles (but same file) for execbuf, i.e. the number of aliases
* that exist in the ctx->handle_vmas LUT for this vma. * that exist in the ctx->handle_vmas LUT for this vma.
*/ */
struct kref ref;
atomic_t open_count; atomic_t open_count;
atomic_t flags; atomic_t flags;
/** /**
...@@ -336,7 +342,20 @@ int __must_check i915_vma_unbind(struct i915_vma *vma); ...@@ -336,7 +342,20 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma); void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma); void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma); void i915_vma_reopen(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
{
if (kref_get_unless_zero(&vma->ref))
return vma;
return NULL;
}
void i915_vma_release(struct kref *ref);
static inline void __i915_vma_put(struct i915_vma *vma)
{
kref_put(&vma->ref, i915_vma_release);
}
#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) #define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment