Commit 1ed7bf5f authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by H. Peter Anvin

xen, pvticketlock: Allow interrupts to be enabled while blocking

If interrupts were enabled when taking the spinlock, we can leave them
enabled while blocking to get the lock.

If we can enable interrupts while waiting for the lock to become
available, and we take an interrupt before entering the poll,
and the handler takes a spinlock which ends up going into
the slow state (invalidating the per-cpu "lock" and "want" values),
then when the interrupt handler returns the event channel will
remain pending so the poll will return immediately, causing it to
return out to the main spinlock loop.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-12-git-send-email-raghavendra.kt@linux.vnet.ibm.comReviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 96f853ea
...@@ -142,7 +142,20 @@ static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) ...@@ -142,7 +142,20 @@ static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
* partially setup state. * partially setup state.
*/ */
local_irq_save(flags); local_irq_save(flags);
/*
* We don't really care if we're overwriting some other
* (lock,want) pair, as that would mean that we're currently
* in an interrupt context, and the outer context had
* interrupts enabled. That has already kicked the VCPU out
* of xen_poll_irq(), so it will just return spuriously and
* retry with newly setup (lock,want).
*
* The ordering protocol on this is that the "lock" pointer
* may only be set non-NULL if the "want" ticket is correct.
* If we're updating "want", we must first clear "lock".
*/
w->lock = NULL;
smp_wmb();
w->want = want; w->want = want;
smp_wmb(); smp_wmb();
w->lock = lock; w->lock = lock;
...@@ -157,24 +170,43 @@ static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) ...@@ -157,24 +170,43 @@ static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
/* Only check lock once pending cleared */ /* Only check lock once pending cleared */
barrier(); barrier();
/* Mark entry to slowpath before doing the pickup test to make /*
sure we don't deadlock with an unlocker. */ * Mark entry to slowpath before doing the pickup test to make
* sure we don't deadlock with an unlocker.
*/
__ticket_enter_slowpath(lock); __ticket_enter_slowpath(lock);
/* check again make sure it didn't become free while /*
we weren't looking */ * check again make sure it didn't become free while
* we weren't looking
*/
if (ACCESS_ONCE(lock->tickets.head) == want) { if (ACCESS_ONCE(lock->tickets.head) == want) {
add_stats(TAKEN_SLOW_PICKUP, 1); add_stats(TAKEN_SLOW_PICKUP, 1);
goto out; goto out;
} }
/* Allow interrupts while blocked */
local_irq_restore(flags);
/*
* If an interrupt happens here, it will leave the wakeup irq
* pending, which will cause xen_poll_irq() to return
* immediately.
*/
/* Block until irq becomes pending (or perhaps a spurious wakeup) */ /* Block until irq becomes pending (or perhaps a spurious wakeup) */
xen_poll_irq(irq); xen_poll_irq(irq);
add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq)); add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
local_irq_save(flags);
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
out: out:
cpumask_clear_cpu(cpu, &waiting_cpus); cpumask_clear_cpu(cpu, &waiting_cpus);
w->lock = NULL; w->lock = NULL;
local_irq_restore(flags); local_irq_restore(flags);
spin_time_accum_blocked(start); spin_time_accum_blocked(start);
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning); PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
...@@ -188,7 +220,9 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) ...@@ -188,7 +220,9 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
for_each_cpu(cpu, &waiting_cpus) { for_each_cpu(cpu, &waiting_cpus) {
const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
if (w->lock == lock && w->want == next) { /* Make sure we read lock before want */
if (ACCESS_ONCE(w->lock) == lock &&
ACCESS_ONCE(w->want) == next) {
add_stats(RELEASED_SLOW_KICKED, 1); add_stats(RELEASED_SLOW_KICKED, 1);
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment