Commit 67a3acaa authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request

As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.

Quoting Linus Torvalds:

> If there is refcounting going on then why use SLAB_TYPESAFE_BY_RCU?

  .. because the object can be accessed (by RCU) after the refcount has
  gone down to zero, and the thing has been released.

  That's the whole and only point of SLAB_TYPESAFE_BY_RCU.

  That flag basically says:

  "I may end up accessing this object *after* it has been free'd,
  because there may be RCU lookups in flight"

  This has nothing to do with constructors. It's ok if the object gets
  reused as an object of the same type and does *not* get
  re-initialized, because we're perfectly fine seeing old stale data.

  What it guarantees is that the slab isn't shared with any other kind
  of object, _and_ that the underlying pages are free'd after an RCU
  quiescent period (so the pages aren't shared with another kind of
  object either during an RCU walk).

  And it doesn't necessarily have to have a constructor, because the
  thing that a RCU walk will care about is

    (a) guaranteed to be an object that *has* been on some RCU list (so
    it's not a "new" object)

    (b) the RCU walk needs to have logic to verify that it's still the
    *same* object and hasn't been re-used as something else.

  In contrast, a SLAB_TYPESAFE_BY_RCU memory gets free'd and re-used
  immediately, but because it gets reused as the same kind of object,
  the RCU walker can "know" what parts have meaning for re-use, in a way
  it couidn't if the re-use was random.

  That said, it *is* subtle, and people should be careful.

> So the re-use might initialize the fields lazily, not necessarily using a ctor.

  If you have a well-defined refcount, and use "atomic_inc_not_zero()"
  to guard the speculative RCU access section, and use
  "atomic_dec_and_test()" in the freeing section, then you should be
  safe wrt new allocations.

  If you have a completely new allocation that has "random stale
  content", you know that it cannot be on the RCU list, so there is no
  speculative access that can ever see that random content.

  So the only case you need to worry about is a re-use allocation, and
  you know that the refcount will start out as zero even if you don't
  have a constructor.

  So you can think of the refcount itself as always having a zero
  constructor, *BUT* you need to be careful with ordering.

  In particular, whoever does the allocation needs to then set the
  refcount to a non-zero value *after* it has initialized all the other
  fields. And in particular, it needs to make sure that it uses the
  proper memory ordering to do so.

  NOTE! One thing to be very worried about is that re-initializing
  whatever RCU lists means that now the RCU walker may be walking on the
  wrong list so the walker may do the right thing for this particular
  entry, but it may miss walking *other* entries. So then you can get
  spurious lookup failures, because the RCU walker never walked all the
  way to the end of the right list. That ends up being a much more
  subtle bug.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191122094924.629690-1-chris@chris-wilson.co.uk
parent f05bfce3
...@@ -188,7 +188,7 @@ static void free_capture_list(struct i915_request *request) ...@@ -188,7 +188,7 @@ static void free_capture_list(struct i915_request *request)
{ {
struct i915_capture_list *capture; struct i915_capture_list *capture;
capture = request->capture_list; capture = fetch_and_zero(&request->capture_list);
while (capture) { while (capture) {
struct i915_capture_list *next = capture->next; struct i915_capture_list *next = capture->next;
...@@ -214,7 +214,7 @@ static void remove_from_engine(struct i915_request *rq) ...@@ -214,7 +214,7 @@ static void remove_from_engine(struct i915_request *rq)
spin_lock(&engine->active.lock); spin_lock(&engine->active.lock);
locked = engine; locked = engine;
} }
list_del(&rq->sched.link); list_del_init(&rq->sched.link);
spin_unlock_irq(&locked->active.lock); spin_unlock_irq(&locked->active.lock);
} }
...@@ -586,6 +586,21 @@ request_alloc_slow(struct intel_timeline *tl, gfp_t gfp) ...@@ -586,6 +586,21 @@ request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
return kmem_cache_alloc(global.slab_requests, gfp); return kmem_cache_alloc(global.slab_requests, gfp);
} }
static void __i915_request_ctor(void *arg)
{
struct i915_request *rq = arg;
spin_lock_init(&rq->lock);
i915_sched_node_init(&rq->sched);
i915_sw_fence_init(&rq->submit, submit_notify);
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
rq->file_priv = NULL;
rq->capture_list = NULL;
INIT_LIST_HEAD(&rq->execute_cb);
}
struct i915_request * struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp) __i915_request_create(struct intel_context *ce, gfp_t gfp)
{ {
...@@ -648,6 +663,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -648,6 +663,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->engine = ce->engine; rq->engine = ce->engine;
rq->ring = ce->ring; rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask; rq->execution_mask = ce->engine->mask;
rq->flags = 0;
rcu_assign_pointer(rq->timeline, tl); rcu_assign_pointer(rq->timeline, tl);
rq->hwsp_seqno = tl->hwsp_seqno; rq->hwsp_seqno = tl->hwsp_seqno;
...@@ -655,23 +671,20 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -655,23 +671,20 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
spin_lock_init(&rq->lock);
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
tl->fence_context, seqno); tl->fence_context, seqno);
/* We bump the ref for the fence chain */ /* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); i915_sw_fence_reinit(&i915_request_get(rq)->submit);
i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify); i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
i915_sched_node_init(&rq->sched); i915_sched_node_reinit(&rq->sched);
/* No zalloc, must clear what we need by hand */ /* No zalloc, everything must be cleared after use */
rq->file_priv = NULL;
rq->batch = NULL; rq->batch = NULL;
rq->capture_list = NULL; GEM_BUG_ON(rq->file_priv);
rq->flags = 0; GEM_BUG_ON(rq->capture_list);
GEM_BUG_ON(!list_empty(&rq->execute_cb));
INIT_LIST_HEAD(&rq->execute_cb);
/* /*
* Reserve space in the ring buffer for all the commands required to * Reserve space in the ring buffer for all the commands required to
...@@ -1533,10 +1546,14 @@ static struct i915_global_request global = { { ...@@ -1533,10 +1546,14 @@ static struct i915_global_request global = { {
int __init i915_global_request_init(void) int __init i915_global_request_init(void)
{ {
global.slab_requests = KMEM_CACHE(i915_request, global.slab_requests =
kmem_cache_create("i915_request",
sizeof(struct i915_request),
__alignof__(struct i915_request),
SLAB_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT | SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU); SLAB_TYPESAFE_BY_RCU,
__i915_request_ctor);
if (!global.slab_requests) if (!global.slab_requests)
return -ENOMEM; return -ENOMEM;
......
...@@ -387,9 +387,19 @@ void i915_sched_node_init(struct i915_sched_node *node) ...@@ -387,9 +387,19 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->signalers_list); INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list); INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link); INIT_LIST_HEAD(&node->link);
i915_sched_node_reinit(node);
}
void i915_sched_node_reinit(struct i915_sched_node *node)
{
node->attr.priority = I915_PRIORITY_INVALID; node->attr.priority = I915_PRIORITY_INVALID;
node->semaphores = 0; node->semaphores = 0;
node->flags = 0; node->flags = 0;
GEM_BUG_ON(!list_empty(&node->signalers_list));
GEM_BUG_ON(!list_empty(&node->waiters_list));
GEM_BUG_ON(!list_empty(&node->link));
} }
static struct i915_dependency * static struct i915_dependency *
...@@ -481,6 +491,7 @@ void i915_sched_node_fini(struct i915_sched_node *node) ...@@ -481,6 +491,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
if (dep->flags & I915_DEPENDENCY_ALLOC) if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep); i915_dependency_free(dep);
} }
INIT_LIST_HEAD(&node->signalers_list);
/* Remove ourselves from everyone who depends upon us */ /* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
...@@ -491,6 +502,7 @@ void i915_sched_node_fini(struct i915_sched_node *node) ...@@ -491,6 +502,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
if (dep->flags & I915_DEPENDENCY_ALLOC) if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep); i915_dependency_free(dep);
} }
INIT_LIST_HEAD(&node->waiters_list);
spin_unlock_irq(&schedule_lock); spin_unlock_irq(&schedule_lock);
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
sched.link) sched.link)
void i915_sched_node_init(struct i915_sched_node *node); void i915_sched_node_init(struct i915_sched_node *node);
void i915_sched_node_reinit(struct i915_sched_node *node);
bool __i915_sched_node_add_dependency(struct i915_sched_node *node, bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal, struct i915_sched_node *signal,
......
...@@ -12,6 +12,12 @@ ...@@ -12,6 +12,12 @@
#include "i915_sw_fence.h" #include "i915_sw_fence.h"
#include "i915_selftest.h" #include "i915_selftest.h"
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr)
#else
#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */ #define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
static DEFINE_SPINLOCK(i915_sw_fence_lock); static DEFINE_SPINLOCK(i915_sw_fence_lock);
...@@ -218,13 +224,21 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, ...@@ -218,13 +224,21 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
{ {
BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK); BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
__init_waitqueue_head(&fence->wait, name, key);
fence->flags = (unsigned long)fn;
i915_sw_fence_reinit(fence);
}
void i915_sw_fence_reinit(struct i915_sw_fence *fence)
{
debug_fence_init(fence); debug_fence_init(fence);
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1); atomic_set(&fence->pending, 1);
fence->error = 0; fence->error = 0;
fence->flags = (unsigned long)fn; I915_SW_FENCE_BUG_ON(!fence->flags);
I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
} }
void i915_sw_fence_commit(struct i915_sw_fence *fence) void i915_sw_fence_commit(struct i915_sw_fence *fence)
......
...@@ -54,6 +54,8 @@ do { \ ...@@ -54,6 +54,8 @@ do { \
__i915_sw_fence_init((fence), (fn), NULL, NULL) __i915_sw_fence_init((fence), (fn), NULL, NULL)
#endif #endif
void i915_sw_fence_reinit(struct i915_sw_fence *fence);
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
void i915_sw_fence_fini(struct i915_sw_fence *fence); void i915_sw_fence_fini(struct i915_sw_fence *fence);
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment