Commit 274cb1ca authored by Nathan Lynch's avatar Nathan Lynch Committed by Michael Ellerman

powerpc/pseries/mobility: handle premature return from H_JOIN

The pseries join/suspend sequence in its current form was written with
the assumption that it was the only user of H_PROD and that it needn't
handle spurious successful returns from H_JOIN. That's wrong;
powerpc's paravirt spinlock code uses H_PROD, and CPUs entering
do_join() can be woken prematurely from H_JOIN with a status of
H_SUCCESS as a result. This causes all CPUs to exit the sequence
early, preventing suspend from occurring at all.

Add a 'done' boolean flag to the pseries_suspend_info struct, and have
the waking thread set it before waking the other threads. Threads
which receive H_SUCCESS from H_JOIN retry if the 'done' flag is still
unset.

Fixes: 9327dc0a ("powerpc/pseries/mobility: use stop_machine for join/suspend")
Signed-off-by: default avatarNathan Lynch <nathanl@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210315080045.460331-3-nathanl@linux.ibm.com
parent e834df6c
...@@ -458,9 +458,12 @@ static int do_suspend(void) ...@@ -458,9 +458,12 @@ static int do_suspend(void)
* or if an error is received from H_JOIN. The thread which performs * or if an error is received from H_JOIN. The thread which performs
* the first increment (i.e. sets it to 1) is responsible for * the first increment (i.e. sets it to 1) is responsible for
* waking the other threads. * waking the other threads.
* @done: False if join/suspend is in progress. True if the operation is
* complete (successful or not).
*/ */
struct pseries_suspend_info { struct pseries_suspend_info {
atomic_t counter; atomic_t counter;
bool done;
}; };
static int do_join(void *arg) static int do_join(void *arg)
...@@ -470,6 +473,7 @@ static int do_join(void *arg) ...@@ -470,6 +473,7 @@ static int do_join(void *arg)
long hvrc; long hvrc;
int ret; int ret;
retry:
/* Must ensure MSR.EE off for H_JOIN. */ /* Must ensure MSR.EE off for H_JOIN. */
hard_irq_disable(); hard_irq_disable();
hvrc = plpar_hcall_norets(H_JOIN); hvrc = plpar_hcall_norets(H_JOIN);
...@@ -485,8 +489,20 @@ static int do_join(void *arg) ...@@ -485,8 +489,20 @@ static int do_join(void *arg)
case H_SUCCESS: case H_SUCCESS:
/* /*
* The suspend is complete and this cpu has received a * The suspend is complete and this cpu has received a
* prod. * prod, or we've received a stray prod from unrelated
* code (e.g. paravirt spinlocks) and we need to join
* again.
*
* This barrier orders the return from H_JOIN above vs
* the load of info->done. It pairs with the barrier
* in the wakeup/prod path below.
*/ */
smp_mb();
if (READ_ONCE(info->done) == false) {
pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
smp_processor_id());
goto retry;
}
ret = 0; ret = 0;
break; break;
case H_BAD_MODE: case H_BAD_MODE:
...@@ -500,6 +516,13 @@ static int do_join(void *arg) ...@@ -500,6 +516,13 @@ static int do_join(void *arg)
if (atomic_inc_return(counter) == 1) { if (atomic_inc_return(counter) == 1) {
pr_info("CPU %u waking all threads\n", smp_processor_id()); pr_info("CPU %u waking all threads\n", smp_processor_id());
WRITE_ONCE(info->done, true);
/*
* This barrier orders the store to info->done vs subsequent
* H_PRODs to wake the other CPUs. It pairs with the barrier
* in the H_SUCCESS case above.
*/
smp_mb();
prod_others(); prod_others();
} }
/* /*
...@@ -553,6 +576,7 @@ static int pseries_suspend(u64 handle) ...@@ -553,6 +576,7 @@ static int pseries_suspend(u64 handle)
info = (struct pseries_suspend_info) { info = (struct pseries_suspend_info) {
.counter = ATOMIC_INIT(0), .counter = ATOMIC_INIT(0),
.done = false,
}; };
ret = stop_machine(do_join, &info, cpu_online_mask); ret = stop_machine(do_join, &info, cpu_online_mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment