Commit dbd6ef29 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use RCU to annotate and enforce protection for breadcrumb's bh

The bottom-half we use for processing the breadcrumb interrupt is a
task, which is an RCU protected struct. When accessing this struct, we
need to be holding the RCU read lock to prevent it disappearing beneath
us. We can use the RCU annotation to mark our irq_seqno_bh pointer as
being under RCU guard and then use the RCU accessors to both provide
correct ordering of access through the pointer.

Most notably, this fixes the access from hard irq context to use the RCU
read lock, which both Daniel and Tvrtko complained about.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1470761272-1245-3-git-send-email-chris@chris-wilson.co.uk
parent 83348ba8
...@@ -3849,7 +3849,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) ...@@ -3849,7 +3849,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
* is woken. * is woken.
*/ */
if (engine->irq_seqno_barrier && if (engine->irq_seqno_barrier &&
READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current && rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
struct task_struct *tsk; struct task_struct *tsk;
...@@ -3874,7 +3874,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) ...@@ -3874,7 +3874,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
* irq_posted == false but we are still running). * irq_posted == false but we are still running).
*/ */
rcu_read_lock(); rcu_read_lock();
tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
if (tsk && tsk != current) if (tsk && tsk != current)
/* Note that if the bottom-half is changed as we /* Note that if the bottom-half is changed as we
* are sending the wake-up, the new bottom-half will * are sending the wake-up, the new bottom-half will
......
...@@ -71,10 +71,8 @@ static void intel_breadcrumbs_fake_irq(unsigned long data) ...@@ -71,10 +71,8 @@ static void intel_breadcrumbs_fake_irq(unsigned long data)
* every jiffie in order to kick the oldest waiter to do the * every jiffie in order to kick the oldest waiter to do the
* coherent seqno check. * coherent seqno check.
*/ */
rcu_read_lock();
if (intel_engine_wakeup(engine)) if (intel_engine_wakeup(engine))
mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
rcu_read_unlock();
} }
static void irq_enable(struct intel_engine_cs *engine) static void irq_enable(struct intel_engine_cs *engine)
...@@ -234,7 +232,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, ...@@ -234,7 +232,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
} }
rb_link_node(&wait->node, parent, p); rb_link_node(&wait->node, parent, p);
rb_insert_color(&wait->node, &b->waiters); rb_insert_color(&wait->node, &b->waiters);
GEM_BUG_ON(!first && !b->irq_seqno_bh); GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
if (completed) { if (completed) {
struct rb_node *next = rb_next(completed); struct rb_node *next = rb_next(completed);
...@@ -244,7 +242,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, ...@@ -244,7 +242,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(first); GEM_BUG_ON(first);
b->timeout = wait_timeout(); b->timeout = wait_timeout();
b->first_wait = to_wait(next); b->first_wait = to_wait(next);
smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
/* As there is a delay between reading the current /* As there is a delay between reading the current
* seqno, processing the completed tasks and selecting * seqno, processing the completed tasks and selecting
* the next waiter, we may have missed the interrupt * the next waiter, we may have missed the interrupt
...@@ -271,7 +269,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, ...@@ -271,7 +269,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(rb_first(&b->waiters) != &wait->node); GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
b->timeout = wait_timeout(); b->timeout = wait_timeout();
b->first_wait = wait; b->first_wait = wait;
smp_store_mb(b->irq_seqno_bh, wait->tsk); rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
/* After assigning ourselves as the new bottom-half, we must /* After assigning ourselves as the new bottom-half, we must
* perform a cursory check to prevent a missed interrupt. * perform a cursory check to prevent a missed interrupt.
* Either we miss the interrupt whilst programming the hardware, * Either we miss the interrupt whilst programming the hardware,
...@@ -282,7 +280,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, ...@@ -282,7 +280,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
*/ */
__intel_breadcrumbs_enable_irq(b); __intel_breadcrumbs_enable_irq(b);
} }
GEM_BUG_ON(!b->irq_seqno_bh); GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
GEM_BUG_ON(!b->first_wait); GEM_BUG_ON(!b->first_wait);
GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node); GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
...@@ -337,7 +335,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, ...@@ -337,7 +335,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
const int priority = wakeup_priority(b, wait->tsk); const int priority = wakeup_priority(b, wait->tsk);
struct rb_node *next; struct rb_node *next;
GEM_BUG_ON(b->irq_seqno_bh != wait->tsk); GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
/* We are the current bottom-half. Find the next candidate, /* We are the current bottom-half. Find the next candidate,
* the first waiter in the queue on the remaining oldest * the first waiter in the queue on the remaining oldest
...@@ -381,13 +379,13 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, ...@@ -381,13 +379,13 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
*/ */
b->timeout = wait_timeout(); b->timeout = wait_timeout();
b->first_wait = to_wait(next); b->first_wait = to_wait(next);
smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
if (b->first_wait->seqno != wait->seqno) if (b->first_wait->seqno != wait->seqno)
__intel_breadcrumbs_enable_irq(b); __intel_breadcrumbs_enable_irq(b);
wake_up_process(b->irq_seqno_bh); wake_up_process(b->first_wait->tsk);
} else { } else {
b->first_wait = NULL; b->first_wait = NULL;
WRITE_ONCE(b->irq_seqno_bh, NULL); rcu_assign_pointer(b->irq_seqno_bh, NULL);
__intel_breadcrumbs_disable_irq(b); __intel_breadcrumbs_disable_irq(b);
} }
} else { } else {
...@@ -401,7 +399,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, ...@@ -401,7 +399,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(b->first_wait == wait); GEM_BUG_ON(b->first_wait == wait);
GEM_BUG_ON(rb_first(&b->waiters) != GEM_BUG_ON(rb_first(&b->waiters) !=
(b->first_wait ? &b->first_wait->node : NULL)); (b->first_wait ? &b->first_wait->node : NULL));
GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters)); GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
spin_unlock(&b->lock); spin_unlock(&b->lock);
} }
...@@ -598,11 +596,9 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915) ...@@ -598,11 +596,9 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915)
* RCU lock, i.e. as we call wake_up_process() we must be holding the * RCU lock, i.e. as we call wake_up_process() we must be holding the
* rcu_read_lock(). * rcu_read_lock().
*/ */
rcu_read_lock();
for_each_engine(engine, i915) for_each_engine(engine, i915)
if (unlikely(intel_engine_wakeup(engine))) if (unlikely(intel_engine_wakeup(engine)))
mask |= intel_engine_flag(engine); mask |= intel_engine_flag(engine);
rcu_read_unlock();
return mask; return mask;
} }
......
...@@ -2410,9 +2410,7 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno) ...@@ -2410,9 +2410,7 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
/* After manually advancing the seqno, fake the interrupt in case /* After manually advancing the seqno, fake the interrupt in case
* there are any waiters for that seqno. * there are any waiters for that seqno.
*/ */
rcu_read_lock();
intel_engine_wakeup(engine); intel_engine_wakeup(engine);
rcu_read_unlock();
} }
static void gen6_bsd_submit_request(struct drm_i915_gem_request *request) static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
......
...@@ -171,7 +171,7 @@ struct intel_engine_cs { ...@@ -171,7 +171,7 @@ struct intel_engine_cs {
* the overhead of waking that client is much preferred. * the overhead of waking that client is much preferred.
*/ */
struct intel_breadcrumbs { struct intel_breadcrumbs {
struct task_struct *irq_seqno_bh; /* bh for user interrupts */ struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
bool irq_posted; bool irq_posted;
spinlock_t lock; /* protects the lists of requests */ spinlock_t lock; /* protects the lists of requests */
...@@ -539,25 +539,32 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, ...@@ -539,25 +539,32 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait); struct intel_wait *wait);
void intel_engine_enable_signaling(struct drm_i915_gem_request *request); void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine) static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
{ {
return READ_ONCE(engine->breadcrumbs.irq_seqno_bh); return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
} }
static inline bool intel_engine_wakeup(struct intel_engine_cs *engine) static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
{ {
bool wakeup = false; bool wakeup = false;
struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
/* Note that for this not to dangerously chase a dangling pointer, /* Note that for this not to dangerously chase a dangling pointer,
* the caller is responsible for ensure that the task remain valid for * we must hold the rcu_read_lock here.
* wake_up_process() i.e. that the RCU grace period cannot expire.
* *
* Also note that tsk is likely to be in !TASK_RUNNING state so an * Also note that tsk is likely to be in !TASK_RUNNING state so an
* early test for tsk->state != TASK_RUNNING before wake_up_process() * early test for tsk->state != TASK_RUNNING before wake_up_process()
* is unlikely to be beneficial. * is unlikely to be beneficial.
*/ */
if (intel_engine_has_waiter(engine)) {
struct task_struct *tsk;
rcu_read_lock();
tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
if (tsk) if (tsk)
wakeup = wake_up_process(tsk); wakeup = wake_up_process(tsk);
rcu_read_unlock();
}
return wakeup; return wakeup;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment