Commit 9c2ef23e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.12-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "Fix a bug on pseries where spurious wakeups from H_PROD would prevent
  partition migration from succeeding.

  Fix oopses seen in pcpu_alloc(), caused by parallel faults of the
  percpu mapping causing us to corrupt the protection key used for the
  mapping, and cause a fatal key fault.

  Thanks to Aneesh Kumar K.V, Murilo Opsfelder Araujo, and Nathan Lynch"

* tag 'powerpc-5.12-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/mm/book3s64: Use the correct storage key value when calling H_PROTECT
  powerpc/pseries/mobility: handle premature return from H_JOIN
  powerpc/pseries/mobility: use struct for shared state
parents fa161995 53f1d317
......@@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
want_v = hpte_encode_avpn(vpn, psize, ssize);
flags = (newpp & 7) | H_AVPN;
flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
flags |= (newpp & HPTE_R_KEY_HI) >> 48;
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
/* Move pp0 into bit 8 (IBM 55) */
flags |= (newpp & HPTE_R_PP0) >> 55;
......
......@@ -452,12 +452,28 @@ static int do_suspend(void)
return ret;
}
/**
* struct pseries_suspend_info - State shared between CPUs for join/suspend.
* @counter: Threads are to increment this upon resuming from suspend
* or if an error is received from H_JOIN. The thread which performs
* the first increment (i.e. sets it to 1) is responsible for
* waking the other threads.
* @done: False if join/suspend is in progress. True if the operation is
* complete (successful or not).
*/
struct pseries_suspend_info {
atomic_t counter;
bool done;
};
static int do_join(void *arg)
{
atomic_t *counter = arg;
struct pseries_suspend_info *info = arg;
atomic_t *counter = &info->counter;
long hvrc;
int ret;
retry:
/* Must ensure MSR.EE off for H_JOIN. */
hard_irq_disable();
hvrc = plpar_hcall_norets(H_JOIN);
......@@ -473,8 +489,20 @@ static int do_join(void *arg)
case H_SUCCESS:
/*
* The suspend is complete and this cpu has received a
* prod.
* prod, or we've received a stray prod from unrelated
* code (e.g. paravirt spinlocks) and we need to join
* again.
*
* This barrier orders the return from H_JOIN above vs
* the load of info->done. It pairs with the barrier
* in the wakeup/prod path below.
*/
smp_mb();
if (READ_ONCE(info->done) == false) {
pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
smp_processor_id());
goto retry;
}
ret = 0;
break;
case H_BAD_MODE:
......@@ -488,6 +516,13 @@ static int do_join(void *arg)
if (atomic_inc_return(counter) == 1) {
pr_info("CPU %u waking all threads\n", smp_processor_id());
WRITE_ONCE(info->done, true);
/*
* This barrier orders the store to info->done vs subsequent
* H_PRODs to wake the other CPUs. It pairs with the barrier
* in the H_SUCCESS case above.
*/
smp_mb();
prod_others();
}
/*
......@@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
int ret;
while (true) {
atomic_t counter = ATOMIC_INIT(0);
struct pseries_suspend_info info;
unsigned long vasi_state;
int vasi_err;
ret = stop_machine(do_join, &counter, cpu_online_mask);
info = (struct pseries_suspend_info) {
.counter = ATOMIC_INIT(0),
.done = false,
};
ret = stop_machine(do_join, &info, cpu_online_mask);
if (ret == 0)
break;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment