Commit c5cf9a91 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Create a kmem_cache to allocate struct i915_priolist from

The i915_priolist are allocated within an atomic context on a path where
we wish to minimise latency. If we use a dedicated kmem_cache, we have
the advantage of a local freelist from which to service new requests
that should keep the latency impact of an allocation small. Though
currently we expect the majority of requests to be at default priority
(and so hit the preallocate priolist), once userspace starts using
priorities they are likely to use many fine grained policies improving
the utilisation of a private slab.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170517121007.27224-9-chris@chris-wilson.co.uk
parent 6c067579
...@@ -2027,6 +2027,7 @@ struct drm_i915_private { ...@@ -2027,6 +2027,7 @@ struct drm_i915_private {
struct kmem_cache *vmas; struct kmem_cache *vmas;
struct kmem_cache *requests; struct kmem_cache *requests;
struct kmem_cache *dependencies; struct kmem_cache *dependencies;
struct kmem_cache *priorities;
const struct intel_device_info info; const struct intel_device_info info;
......
...@@ -4866,12 +4866,16 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) ...@@ -4866,12 +4866,16 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (!dev_priv->dependencies) if (!dev_priv->dependencies)
goto err_requests; goto err_requests;
dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
if (!dev_priv->priorities)
goto err_dependencies;
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
INIT_LIST_HEAD(&dev_priv->gt.timelines); INIT_LIST_HEAD(&dev_priv->gt.timelines);
err = i915_gem_timeline_init__global(dev_priv); err = i915_gem_timeline_init__global(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
if (err) if (err)
goto err_dependencies; goto err_priorities;
INIT_LIST_HEAD(&dev_priv->context_list); INIT_LIST_HEAD(&dev_priv->context_list);
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
...@@ -4895,6 +4899,8 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) ...@@ -4895,6 +4899,8 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
return 0; return 0;
err_priorities:
kmem_cache_destroy(dev_priv->priorities);
err_dependencies: err_dependencies:
kmem_cache_destroy(dev_priv->dependencies); kmem_cache_destroy(dev_priv->dependencies);
err_requests: err_requests:
...@@ -4918,6 +4924,7 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) ...@@ -4918,6 +4924,7 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
WARN_ON(!list_empty(&dev_priv->gt.timelines)); WARN_ON(!list_empty(&dev_priv->gt.timelines));
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
kmem_cache_destroy(dev_priv->priorities);
kmem_cache_destroy(dev_priv->dependencies); kmem_cache_destroy(dev_priv->dependencies);
kmem_cache_destroy(dev_priv->requests); kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas); kmem_cache_destroy(dev_priv->vmas);
......
...@@ -704,7 +704,7 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine) ...@@ -704,7 +704,7 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
rb_erase(&p->node, &engine->execlist_queue); rb_erase(&p->node, &engine->execlist_queue);
INIT_LIST_HEAD(&p->requests); INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL) if (p->priority != I915_PRIORITY_NORMAL)
kfree(p); kmem_cache_free(engine->i915->priorities, p);
} }
done: done:
engine->execlist_first = rb; engine->execlist_first = rb;
......
...@@ -499,7 +499,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -499,7 +499,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
rb_erase(&p->node, &engine->execlist_queue); rb_erase(&p->node, &engine->execlist_queue);
INIT_LIST_HEAD(&p->requests); INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL) if (p->priority != I915_PRIORITY_NORMAL)
kfree(p); kmem_cache_free(engine->i915->priorities, p);
} }
done: done:
engine->execlist_first = rb; engine->execlist_first = rb;
...@@ -661,7 +661,7 @@ insert_request(struct intel_engine_cs *engine, ...@@ -661,7 +661,7 @@ insert_request(struct intel_engine_cs *engine,
if (prio == I915_PRIORITY_NORMAL) { if (prio == I915_PRIORITY_NORMAL) {
p = &engine->default_priolist; p = &engine->default_priolist;
} else { } else {
p = kmalloc(sizeof(*p), GFP_ATOMIC); p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */ /* Convert an allocation failure to a priority bump */
if (unlikely(!p)) { if (unlikely(!p)) {
prio = I915_PRIORITY_NORMAL; /* recurses just once */ prio = I915_PRIORITY_NORMAL; /* recurses just once */
......
...@@ -74,6 +74,7 @@ static void mock_device_release(struct drm_device *dev) ...@@ -74,6 +74,7 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq); destroy_workqueue(i915->wq);
kmem_cache_destroy(i915->priorities);
kmem_cache_destroy(i915->dependencies); kmem_cache_destroy(i915->dependencies);
kmem_cache_destroy(i915->requests); kmem_cache_destroy(i915->requests);
kmem_cache_destroy(i915->vmas); kmem_cache_destroy(i915->vmas);
...@@ -186,12 +187,16 @@ struct drm_i915_private *mock_gem_device(void) ...@@ -186,12 +187,16 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->dependencies) if (!i915->dependencies)
goto err_requests; goto err_requests;
i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
if (!i915->priorities)
goto err_dependencies;
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
INIT_LIST_HEAD(&i915->gt.timelines); INIT_LIST_HEAD(&i915->gt.timelines);
err = i915_gem_timeline_init__global(i915); err = i915_gem_timeline_init__global(i915);
if (err) { if (err) {
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
goto err_dependencies; goto err_priorities;
} }
mock_init_ggtt(i915); mock_init_ggtt(i915);
...@@ -211,6 +216,8 @@ struct drm_i915_private *mock_gem_device(void) ...@@ -211,6 +216,8 @@ struct drm_i915_private *mock_gem_device(void)
err_engine: err_engine:
for_each_engine(engine, i915, id) for_each_engine(engine, i915, id)
mock_engine_free(engine); mock_engine_free(engine);
err_priorities:
kmem_cache_destroy(i915->priorities);
err_dependencies: err_dependencies:
kmem_cache_destroy(i915->dependencies); kmem_cache_destroy(i915->dependencies);
err_requests: err_requests:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment