Commit fa1827d7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.2-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "One fix for a regression introduced by our 32-bit KASAN support, which
  broke booting on machines with "bootx" early debugging enabled.

  A fix for a bug which broke kexec on 32-bit, introduced by changes to
  the 32-bit STRICT_KERNEL_RWX support in v5.1.

  Finally two fixes going to stable for our THP split/collapse handling,
  discovered by Nick. The first fixes random crashes and/or corruption
  in guests under sufficient load.

  Thanks to: Nicholas Piggin, Christophe Leroy, Aaro Koskinen, Mathieu
  Malaterre"

* tag 'powerpc-5.2-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/32s: fix booting with CONFIG_PPC_EARLY_DEBUG_BOOTX
  powerpc/64s: __find_linux_pte() synchronization vs pmdp_invalidate()
  powerpc/64s: Fix THP PMD collapse serialisation
  powerpc: Fix kexec failure on book3s/32
parents 6a71398c c21f5a9e
...@@ -876,6 +876,23 @@ static inline int pmd_present(pmd_t pmd) ...@@ -876,6 +876,23 @@ static inline int pmd_present(pmd_t pmd)
return false; return false;
} }
static inline int pmd_is_serializing(pmd_t pmd)
{
/*
* If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear
* and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).
*
* This condition may also occur when flushing a pmd while flushing
* it (see ptep_modify_prot_start), so callers must ensure this
* case is fine as well.
*/
if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
cpu_to_be64(_PAGE_INVALID))
return true;
return false;
}
static inline int pmd_bad(pmd_t pmd) static inline int pmd_bad(pmd_t pmd)
{ {
if (radix_enabled()) if (radix_enabled())
...@@ -1092,6 +1109,19 @@ static inline int pmd_protnone(pmd_t pmd) ...@@ -1092,6 +1109,19 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_access_permitted pmd_access_permitted #define pmd_access_permitted pmd_access_permitted
static inline bool pmd_access_permitted(pmd_t pmd, bool write) static inline bool pmd_access_permitted(pmd_t pmd, bool write)
{ {
/*
* pmdp_invalidate sets this combination (which is not caught by
* !pte_present() check in pte_access_permitted), to prevent
* lock-free lookups, as part of the serialize_against_pte_lookup()
* synchronisation.
*
* This also catches the case where the PTE's hardware PRESENT bit is
* cleared while TLB is flushed, which is suboptimal but should not
* be frequent.
*/
if (pmd_is_serializing(pmd))
return false;
return pte_access_permitted(pmd_pte(pmd), write); return pte_access_permitted(pmd_pte(pmd), write);
} }
......
...@@ -13,7 +13,11 @@ extern void btext_update_display(unsigned long phys, int width, int height, ...@@ -13,7 +13,11 @@ extern void btext_update_display(unsigned long phys, int width, int height,
int depth, int pitch); int depth, int pitch);
extern void btext_setup_display(int width, int height, int depth, int pitch, extern void btext_setup_display(int width, int height, int depth, int pitch,
unsigned long address); unsigned long address);
#ifdef CONFIG_PPC32
extern void btext_prepare_BAT(void); extern void btext_prepare_BAT(void);
#else
static inline void btext_prepare_BAT(void) { }
#endif
extern void btext_map(void); extern void btext_map(void);
extern void btext_unmap(void); extern void btext_unmap(void);
......
...@@ -94,6 +94,9 @@ static inline bool kdump_in_progress(void) ...@@ -94,6 +94,9 @@ static inline bool kdump_in_progress(void)
return crashing_cpu >= 0; return crashing_cpu >= 0;
} }
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops; extern const struct kexec_file_ops kexec_elf64_ops;
......
...@@ -30,7 +30,6 @@ typedef void (*relocate_new_kernel_t)( ...@@ -30,7 +30,6 @@ typedef void (*relocate_new_kernel_t)(
*/ */
void default_machine_kexec(struct kimage *image) void default_machine_kexec(struct kimage *image)
{ {
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size; extern const unsigned int relocate_new_kernel_size;
unsigned long page_list; unsigned long page_list;
unsigned long reboot_code_buffer, reboot_code_buffer_phys; unsigned long reboot_code_buffer, reboot_code_buffer_phys;
...@@ -58,6 +57,9 @@ void default_machine_kexec(struct kimage *image) ...@@ -58,6 +57,9 @@ void default_machine_kexec(struct kimage *image)
reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n"); printk(KERN_INFO "Bye!\n");
if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x))
relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
/* now call it */ /* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer; rnk = (relocate_new_kernel_t) reboot_code_buffer;
(*rnk)(page_list, reboot_code_buffer_phys, image->start); (*rnk)(page_list, reboot_code_buffer_phys, image->start);
......
...@@ -2336,6 +2336,7 @@ static void __init prom_check_displays(void) ...@@ -2336,6 +2336,7 @@ static void __init prom_check_displays(void)
prom_printf("W=%d H=%d LB=%d addr=0x%x\n", prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
width, height, pitch, addr); width, height, pitch, addr);
btext_setup_display(width, height, 8, pitch, addr); btext_setup_display(width, height, 8, pitch, addr);
btext_prepare_BAT();
} }
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
} }
......
...@@ -24,7 +24,7 @@ fi ...@@ -24,7 +24,7 @@ fi
WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
_end enter_prom $MEM_FUNCS reloc_offset __secondary_hold _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
__secondary_hold_acknowledge __secondary_hold_spinloop __start __secondary_hold_acknowledge __secondary_hold_spinloop __start
logo_linux_clut224 logo_linux_clut224 btext_prepare_BAT
reloc_got2 kernstart_addr memstart_addr linux_banner _stext reloc_got2 kernstart_addr memstart_addr linux_banner _stext
__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC." __prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
......
...@@ -112,6 +112,9 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ...@@ -112,6 +112,9 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
/* /*
* This ensures that generic code that rely on IRQ disabling * This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected. * to prevent a parallel THP split work as expected.
*
* Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
* a special case check in pmd_access_permitted.
*/ */
serialize_against_pte_lookup(vma->vm_mm); serialize_against_pte_lookup(vma->vm_mm);
return __pmd(old_pmd); return __pmd(old_pmd);
......
...@@ -368,13 +368,25 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, ...@@ -368,13 +368,25 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
pdshift = PMD_SHIFT; pdshift = PMD_SHIFT;
pmdp = pmd_offset(&pud, ea); pmdp = pmd_offset(&pud, ea);
pmd = READ_ONCE(*pmdp); pmd = READ_ONCE(*pmdp);
/* /*
* A hugepage collapse is captured by pmd_none, because * A hugepage collapse is captured by this condition, see
* it mark the pmd none and do a hpte invalidate. * pmdp_collapse_flush.
*/ */
if (pmd_none(pmd)) if (pmd_none(pmd))
return NULL; return NULL;
#ifdef CONFIG_PPC_BOOK3S_64
/*
* A hugepage split is captured by this condition, see
* pmdp_invalidate.
*
* Huge page modification can be caught here too.
*/
if (pmd_is_serializing(pmd))
return NULL;
#endif
if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
if (is_thp) if (is_thp)
*is_thp = true; *is_thp = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment