Commit a01cb37a authored by Chris Wilson's avatar Chris Wilson

drm/i915: Remove i915_vma_create from VMA API

With the introduce of i915_vma_instance() for obtaining the VMA
singleton for a (obj, vm, view) tuple, we can remove the
i915_vma_create() in favour of a single entry point. We do incur a
lookup onto an empty tree, but the i915_vma_create() were being called
infrequently and during initialisation, so the small overhead is
negligible.

v2: Drop the i915_ prefix from the now static vma_create() function
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170116152131.18089-4-chris@chris-wilson.co.uk
parent 4ea9527c
...@@ -269,7 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, ...@@ -269,7 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
goto err_out; goto err_out;
} }
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
i915_gem_object_put(obj); i915_gem_object_put(obj);
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
......
...@@ -200,7 +200,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine) ...@@ -200,7 +200,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine)
goto err_free; goto err_free;
} }
so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(so->vma)) { if (IS_ERR(so->vma)) {
ret = PTR_ERR(so->vma); ret = PTR_ERR(so->vma);
goto err_obj; goto err_obj;
......
...@@ -566,7 +566,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) ...@@ -566,7 +566,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err; goto err;
......
...@@ -69,16 +69,14 @@ i915_vma_retire(struct i915_gem_active *active, ...@@ -69,16 +69,14 @@ i915_vma_retire(struct i915_gem_active *active,
} }
static struct i915_vma * static struct i915_vma *
__i915_vma_create(struct drm_i915_gem_object *obj, vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm, struct i915_address_space *vm,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct i915_vma *vma; struct i915_vma *vma;
struct rb_node *rb, **p; struct rb_node *rb, **p;
int i; int i;
GEM_BUG_ON(vm->closed);
vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
if (vma == NULL) if (vma == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -186,31 +184,6 @@ i915_vma_lookup(struct drm_i915_gem_object *obj, ...@@ -186,31 +184,6 @@ i915_vma_lookup(struct drm_i915_gem_object *obj,
return NULL; return NULL;
} }
/**
* i915_vma_create - creates a VMA
* @obj: parent &struct drm_i915_gem_object to be mapped
* @vm: address space in which the mapping is located
* @view: additional mapping requirements
*
* i915_vma_create() allocates a new VMA of the @obj in the @vm with
* @view characteristics.
*
* Must be called with struct_mutex held.
*
* Returns the vma if found, or an error pointer.
*/
struct i915_vma *
i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
GEM_BUG_ON(i915_vma_lookup(obj, vm, view));
return __i915_vma_create(obj, vm, view);
}
/** /**
* i915_vma_instance - return the singleton instance of the VMA * i915_vma_instance - return the singleton instance of the VMA
* @obj: parent &struct drm_i915_gem_object to be mapped * @obj: parent &struct drm_i915_gem_object to be mapped
...@@ -239,7 +212,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj, ...@@ -239,7 +212,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
vma = i915_vma_lookup(obj, vm, view); vma = i915_vma_lookup(obj, vm, view);
if (!vma) if (!vma)
vma = i915_vma_create(obj, vm, view); vma = vma_create(obj, vm, view);
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma)); GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
......
...@@ -111,11 +111,6 @@ struct i915_vma { ...@@ -111,11 +111,6 @@ struct i915_vma {
struct drm_i915_gem_exec_object2 *exec_entry; struct drm_i915_gem_exec_object2 *exec_entry;
}; };
struct i915_vma *
i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view);
struct i915_vma * struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object *obj, i915_vma_lookup(struct drm_i915_gem_object *obj,
struct i915_address_space *vm, struct i915_address_space *vm,
......
...@@ -264,7 +264,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) ...@@ -264,7 +264,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj); return PTR_ERR(obj);
} }
vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err_unref; goto err_unref;
......
...@@ -1225,7 +1225,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) ...@@ -1225,7 +1225,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err;
...@@ -2198,7 +2198,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2198,7 +2198,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
return PTR_ERR(ctx_obj); return PTR_ERR(ctx_obj);
} }
vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL); vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto error_deref_obj; goto error_deref_obj;
......
...@@ -1738,7 +1738,7 @@ static int init_status_page(struct intel_engine_cs *engine) ...@@ -1738,7 +1738,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret) if (ret)
goto err; goto err;
vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err; goto err;
...@@ -1872,7 +1872,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) ...@@ -1872,7 +1872,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
/* mark ring buffers as read-only from GPU side by default */ /* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1; obj->gt_ro = 1;
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err; goto err;
...@@ -2462,7 +2462,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, ...@@ -2462,7 +2462,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
if (IS_ERR(obj)) if (IS_ERR(obj))
goto err; goto err;
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err_obj; goto err_obj;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment