Commit e6e1a304 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Matthew Auld

drm/i915: vma is always backed by an object.

vma->obj and vma->resv are now never NULL, and some checks can be removed.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211117142024.1043017-4-matthew.auld@intel.com
parent d03a29e0
...@@ -219,7 +219,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce, ...@@ -219,7 +219,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
*/ */
err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
if (!err && ce->ring->vma->obj) if (!err)
err = i915_gem_object_lock(ce->ring->vma->obj, ww); err = i915_gem_object_lock(ce->ring->vma->obj, ww);
if (!err && ce->state) if (!err && ce->state)
err = i915_gem_object_lock(ce->state->obj, ww); err = i915_gem_object_lock(ce->state->obj, ww);
......
...@@ -1357,7 +1357,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) ...@@ -1357,7 +1357,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
if (!err && gen7_wa_vma) if (!err && gen7_wa_vma)
err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
if (!err && engine->legacy.ring->vma->obj) if (!err)
err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
if (!err) if (!err)
err = intel_timeline_pin(timeline, &ww); err = intel_timeline_pin(timeline, &ww);
......
...@@ -40,12 +40,12 @@ ...@@ -40,12 +40,12 @@
static struct kmem_cache *slab_vmas; static struct kmem_cache *slab_vmas;
struct i915_vma *i915_vma_alloc(void) static struct i915_vma *i915_vma_alloc(void)
{ {
return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
} }
void i915_vma_free(struct i915_vma *vma) static void i915_vma_free(struct i915_vma *vma)
{ {
return kmem_cache_free(slab_vmas, vma); return kmem_cache_free(slab_vmas, vma);
} }
...@@ -426,10 +426,8 @@ int i915_vma_bind(struct i915_vma *vma, ...@@ -426,10 +426,8 @@ int i915_vma_bind(struct i915_vma *vma,
work->base.dma.error = 0; /* enable the queue_work() */ work->base.dma.error = 0; /* enable the queue_work() */
if (vma->obj) { __i915_gem_object_pin_pages(vma->obj);
__i915_gem_object_pin_pages(vma->obj); work->pinned = i915_gem_object_get(vma->obj);
work->pinned = i915_gem_object_get(vma->obj);
}
} else { } else {
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
} }
...@@ -670,7 +668,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -670,7 +668,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
} }
color = 0; color = 0;
if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) if (i915_vm_has_cache_coloring(vma->vm))
color = vma->obj->cache_level; color = vma->obj->cache_level;
if (flags & PIN_OFFSET_FIXED) { if (flags & PIN_OFFSET_FIXED) {
...@@ -795,17 +793,14 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) ...@@ -795,17 +793,14 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
static int vma_get_pages(struct i915_vma *vma) static int vma_get_pages(struct i915_vma *vma)
{ {
int err = 0; int err = 0;
bool pinned_pages = false; bool pinned_pages = true;
if (atomic_add_unless(&vma->pages_count, 1, 0)) if (atomic_add_unless(&vma->pages_count, 1, 0))
return 0; return 0;
if (vma->obj) { err = i915_gem_object_pin_pages(vma->obj);
err = i915_gem_object_pin_pages(vma->obj); if (err)
if (err) return err;
return err;
pinned_pages = true;
}
/* Allocations ahoy! */ /* Allocations ahoy! */
if (mutex_lock_interruptible(&vma->pages_mutex)) { if (mutex_lock_interruptible(&vma->pages_mutex)) {
...@@ -838,8 +833,8 @@ static void __vma_put_pages(struct i915_vma *vma, unsigned int count) ...@@ -838,8 +833,8 @@ static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
if (atomic_sub_return(count, &vma->pages_count) == 0) { if (atomic_sub_return(count, &vma->pages_count) == 0) {
vma->ops->clear_pages(vma); vma->ops->clear_pages(vma);
GEM_BUG_ON(vma->pages); GEM_BUG_ON(vma->pages);
if (vma->obj)
i915_gem_object_unpin_pages(vma->obj); i915_gem_object_unpin_pages(vma->obj);
} }
mutex_unlock(&vma->pages_mutex); mutex_unlock(&vma->pages_mutex);
} }
...@@ -875,7 +870,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, ...@@ -875,7 +870,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
int err; int err;
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
if (debug_locks && !WARN_ON(!ww) && vma->resv) if (debug_locks && !WARN_ON(!ww))
assert_vma_held(vma); assert_vma_held(vma);
#endif #endif
...@@ -983,7 +978,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, ...@@ -983,7 +978,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!vma->pages); GEM_BUG_ON(!vma->pages);
err = i915_vma_bind(vma, err = i915_vma_bind(vma,
vma->obj ? vma->obj->cache_level : 0, vma->obj->cache_level,
flags, work); flags, work);
if (err) if (err)
goto err_remove; goto err_remove;
...@@ -1037,7 +1032,7 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, ...@@ -1037,7 +1032,7 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv)); WARN_ON(!ww && dma_resv_held(vma->resv));
#endif #endif
do { do {
...@@ -1116,6 +1111,7 @@ void i915_vma_reopen(struct i915_vma *vma) ...@@ -1116,6 +1111,7 @@ void i915_vma_reopen(struct i915_vma *vma)
void i915_vma_release(struct kref *ref) void i915_vma_release(struct kref *ref)
{ {
struct i915_vma *vma = container_of(ref, typeof(*vma), ref); struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
struct drm_i915_gem_object *obj = vma->obj;
if (drm_mm_node_allocated(&vma->node)) { if (drm_mm_node_allocated(&vma->node)) {
mutex_lock(&vma->vm->mutex); mutex_lock(&vma->vm->mutex);
...@@ -1126,15 +1122,11 @@ void i915_vma_release(struct kref *ref) ...@@ -1126,15 +1122,11 @@ void i915_vma_release(struct kref *ref)
} }
GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_active(vma));
if (vma->obj) { spin_lock(&obj->vma.lock);
struct drm_i915_gem_object *obj = vma->obj; list_del(&vma->obj_link);
if (!RB_EMPTY_NODE(&vma->obj_node))
spin_lock(&obj->vma.lock); rb_erase(&vma->obj_node, &obj->vma.tree);
list_del(&vma->obj_link); spin_unlock(&obj->vma.lock);
if (!RB_EMPTY_NODE(&vma->obj_node))
rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
}
__i915_vma_remove_closed(vma); __i915_vma_remove_closed(vma);
i915_vm_put(vma->vm); i915_vm_put(vma->vm);
......
...@@ -418,9 +418,6 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma) ...@@ -418,9 +418,6 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma)
list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \ list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V)) for_each_until(!i915_vma_is_ggtt(V))
struct i915_vma *i915_vma_alloc(void);
void i915_vma_free(struct i915_vma *vma);
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma); struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
void i915_vma_make_shrinkable(struct i915_vma *vma); void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma); void i915_vma_make_purgeable(struct i915_vma *vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment