Commit 7006fe2f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-urgent-2024-08-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:

 - Fix 32-bit PTI for real.

   pti_clone_entry_text() is called twice, once before initcalls so that
   initcalls can use the user-mode helper and then again after text is
   set read only. Setting read only on 32-bit might break up the PMD
   mapping, which makes the second invocation of pti_clone_entry_text()
   find the mappings out of sync and failing.

   Allow the second call to split the existing PMDs in the user mapping
   and synchronize with the kernel mapping.

 - Don't make acpi_mp_wake_mailbox read-only after init as the mail box
   must be writable in the case that CPU hotplug operations happen after
   boot. Otherwise the attempt to start a CPU crashes with a write to
   read only memory.

 - Add a missing sanity check in mtrr_save_state() to ensure that the
   fixed MTRR MSRs are supported.

   Otherwise mtrr_save_state() ends up in a #GP, which is fixed up, but
   the WARN_ON() can bring systems down when panic on warn is set.

* tag 'x86-urgent-2024-08-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mtrr: Check if fixed MTRRs exist before saving them
  x86/paravirt: Fix incorrect virt spinlock setting on bare metal
  x86/acpi: Remove __ro_after_init from acpi_mp_wake_mailbox
  x86/mm: Fix PTI for i386 some more
parents 7270e931 919f18f9
...@@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(long cpu) ...@@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(long cpu)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
/* /*
* virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack. * virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
* *
* Native (and PV wanting native due to vCPU pinning) should disable this key. * Native (and PV wanting native due to vCPU pinning) should keep this key
* It is done in this backwards fashion to only have a single direction change, * disabled. Native does not touch the key.
* which removes ordering between native_pv_spin_init() and HV setup. *
* When in a guest then native_pv_lock_init() enables the key first and
* KVM/XEN might conditionally disable it later in the boot process again.
*/ */
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
/* /*
* Shortcut for the queued_spin_lock_slowpath() function that allows * Shortcut for the queued_spin_lock_slowpath() function that allows
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
static u64 acpi_mp_wake_mailbox_paddr __ro_after_init; static u64 acpi_mp_wake_mailbox_paddr __ro_after_init;
/* Virtual address of the Multiprocessor Wakeup Structure mailbox */ /* Virtual address of the Multiprocessor Wakeup Structure mailbox */
static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_after_init; static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
static u64 acpi_mp_pgd __ro_after_init; static u64 acpi_mp_pgd __ro_after_init;
static u64 acpi_mp_reset_vector_paddr __ro_after_init; static u64 acpi_mp_reset_vector_paddr __ro_after_init;
......
...@@ -609,7 +609,7 @@ void mtrr_save_state(void) ...@@ -609,7 +609,7 @@ void mtrr_save_state(void)
{ {
int first_cpu; int first_cpu;
if (!mtrr_enabled()) if (!mtrr_enabled() || !mtrr_state.have_fixed)
return; return;
first_cpu = cpumask_first(cpu_online_mask); first_cpu = cpumask_first(cpu_online_mask);
......
...@@ -51,13 +51,12 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text); ...@@ -51,13 +51,12 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
#endif #endif
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
void __init native_pv_lock_init(void) void __init native_pv_lock_init(void)
{ {
if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
!boot_cpu_has(X86_FEATURE_HYPERVISOR)) static_branch_enable(&virt_spin_lock_key);
static_branch_disable(&virt_spin_lock_key);
} }
static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
......
...@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) ...@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
* *
* Returns a pointer to a PTE on success, or NULL on failure. * Returns a pointer to a PTE on success, or NULL on failure.
*/ */
static pte_t *pti_user_pagetable_walk_pte(unsigned long address) static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
{ {
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
pmd_t *pmd; pmd_t *pmd;
...@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address) ...@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
if (!pmd) if (!pmd)
return NULL; return NULL;
/* We can't do anything sensible if we hit a large mapping. */ /* Large PMD mapping found */
if (pmd_leaf(*pmd)) { if (pmd_leaf(*pmd)) {
WARN_ON(1); /* Clear the PMD if we hit a large mapping from the first round */
return NULL; if (late_text) {
set_pmd(pmd, __pmd(0));
} else {
WARN_ON_ONCE(1);
return NULL;
}
} }
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
...@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void) ...@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
return; return;
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
if (WARN_ON(!target_pte)) if (WARN_ON(!target_pte))
return; return;
...@@ -301,7 +306,7 @@ enum pti_clone_level { ...@@ -301,7 +306,7 @@ enum pti_clone_level {
static void static void
pti_clone_pgtable(unsigned long start, unsigned long end, pti_clone_pgtable(unsigned long start, unsigned long end,
enum pti_clone_level level) enum pti_clone_level level, bool late_text)
{ {
unsigned long addr; unsigned long addr;
...@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end, ...@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
return; return;
/* Allocate PTE in the user page-table */ /* Allocate PTE in the user page-table */
target_pte = pti_user_pagetable_walk_pte(addr); target_pte = pti_user_pagetable_walk_pte(addr, late_text);
if (WARN_ON(!target_pte)) if (WARN_ON(!target_pte))
return; return;
...@@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void) ...@@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void)
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va); phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
pte_t *target_pte; pte_t *target_pte;
target_pte = pti_user_pagetable_walk_pte(va); target_pte = pti_user_pagetable_walk_pte(va, false);
if (WARN_ON(!target_pte)) if (WARN_ON(!target_pte))
return; return;
...@@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void) ...@@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void)
start = CPU_ENTRY_AREA_BASE; start = CPU_ENTRY_AREA_BASE;
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
pti_clone_pgtable(start, end, PTI_CLONE_PMD); pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
} }
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
...@@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void) ...@@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void)
/* /*
* Clone the populated PMDs of the entry text and force it RO. * Clone the populated PMDs of the entry text and force it RO.
*/ */
static void pti_clone_entry_text(void) static void pti_clone_entry_text(bool late)
{ {
pti_clone_pgtable((unsigned long) __entry_text_start, pti_clone_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end, (unsigned long) __entry_text_end,
PTI_LEVEL_KERNEL_IMAGE); PTI_LEVEL_KERNEL_IMAGE, late);
} }
/* /*
...@@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void) ...@@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void)
* pti_set_kernel_image_nonglobal() did to clear the * pti_set_kernel_image_nonglobal() did to clear the
* global bit. * global bit.
*/ */
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE); pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
/* /*
* pti_clone_pgtable() will set the global bit in any PMDs * pti_clone_pgtable() will set the global bit in any PMDs
...@@ -638,8 +643,15 @@ void __init pti_init(void) ...@@ -638,8 +643,15 @@ void __init pti_init(void)
/* Undo all global bits from the init pagetables in head_64.S: */ /* Undo all global bits from the init pagetables in head_64.S: */
pti_set_kernel_image_nonglobal(); pti_set_kernel_image_nonglobal();
/* Replace some of the global bits just for shared entry text: */ /* Replace some of the global bits just for shared entry text: */
pti_clone_entry_text(); /*
* This is very early in boot. Device and Late initcalls can do
* modprobe before free_initmem() and mark_readonly(). This
* pti_clone_entry_text() allows those user-mode-helpers to function,
* but notably the text is still RW.
*/
pti_clone_entry_text(false);
pti_setup_espfix64(); pti_setup_espfix64();
pti_setup_vsyscall(); pti_setup_vsyscall();
} }
...@@ -656,10 +668,11 @@ void pti_finalize(void) ...@@ -656,10 +668,11 @@ void pti_finalize(void)
if (!boot_cpu_has(X86_FEATURE_PTI)) if (!boot_cpu_has(X86_FEATURE_PTI))
return; return;
/* /*
* We need to clone everything (again) that maps parts of the * This is after free_initmem() (all initcalls are done) and we've done
* kernel image. * mark_readonly(). Text is now NX which might've split some PMDs
* relative to the early clone.
*/ */
pti_clone_entry_text(); pti_clone_entry_text(true);
pti_clone_kernel_text(); pti_clone_kernel_text();
debug_checkwx_user(); debug_checkwx_user();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment