Commit aadaa806 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "A handful of fixes:

   - Fix an MCE corner case bug/crash found via MCE injection testing

   - Fix 5-level paging boot crash

   - Fix MCE recovery cache invalidation bug

   - Fix regression on Xen guests caused by a recent PMD level mremap
     speedup optimization"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Make set_pmd_at() paravirt aware
  x86/mm/cpa: Fix set_mce_nospec()
  x86/boot/compressed/64: Do not corrupt EDX on EFER.LME=1 setting
  x86/MCE: Initialize mce.bank in the case of a fatal error in mce_no_way_out()
parents 73a4c521 20e55bc1
...@@ -602,10 +602,12 @@ ENTRY(trampoline_32bit_src) ...@@ -602,10 +602,12 @@ ENTRY(trampoline_32bit_src)
3: 3:
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
pushl %ecx pushl %ecx
pushl %edx
movl $MSR_EFER, %ecx movl $MSR_EFER, %ecx
rdmsr rdmsr
btsl $_EFER_LME, %eax btsl $_EFER_LME, %eax
wrmsr wrmsr
popl %edx
popl %ecx popl %ecx
/* Enable PAE and LA57 (if required) paging modes */ /* Enable PAE and LA57 (if required) paging modes */
......
...@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd) pmd_t *pmdp, pmd_t pmd)
{ {
native_set_pmd(pmdp, pmd); set_pmd(pmdp, pmd);
} }
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
......
...@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, ...@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
quirk_no_way_out(i, m, regs); quirk_no_way_out(i, m, regs);
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
m->bank = i;
mce_read_aux(m, i); mce_read_aux(m, i);
*msg = tmp; *msg = tmp;
return 1; return 1;
......
...@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) ...@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
#endif #endif
/*
* See set_mce_nospec().
*
* Machine check recovery code needs to change cache mode of poisoned pages to
* UC to avoid speculative access logging another error. But passing the
* address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
* speculative access. So we cheat and flip the top bit of the address. This
* works fine for the code that updates the page tables. But at the end of the
* process we need to flush the TLB and cache and the non-canonical address
* causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
*
* But in the common case we already have a canonical address. This code
* will fix the top bit if needed and is a no-op otherwise.
*/
static inline unsigned long fix_addr(unsigned long addr)
{
#ifdef CONFIG_X86_64
return (long)(addr << 1) >> 1;
#else
return addr;
#endif
}
static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
{ {
if (cpa->flags & CPA_PAGES_ARRAY) { if (cpa->flags & CPA_PAGES_ARRAY) {
...@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data) ...@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data)
unsigned int i; unsigned int i;
for (i = 0; i < cpa->numpages; i++) for (i = 0; i < cpa->numpages; i++)
__flush_tlb_one_kernel(__cpa_addr(cpa, i)); __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
} }
static void cpa_flush(struct cpa_data *data, int cache) static void cpa_flush(struct cpa_data *data, int cache)
...@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache) ...@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
* Only flush present addresses: * Only flush present addresses:
*/ */
if (pte && (pte_val(*pte) & _PAGE_PRESENT)) if (pte && (pte_val(*pte) & _PAGE_PRESENT))
clflush_cache_range_opt((void *)addr, PAGE_SIZE); clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
} }
mb(); mb();
} }
...@@ -1627,29 +1650,6 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) ...@@ -1627,29 +1650,6 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
return ret; return ret;
} }
/*
* Machine check recovery code needs to change cache mode of poisoned
* pages to UC to avoid speculative access logging another error. But
* passing the address of the 1:1 mapping to set_memory_uc() is a fine
* way to encourage a speculative access. So we cheat and flip the top
* bit of the address. This works fine for the code that updates the
* page tables. But at the end of the process we need to flush the cache
* and the non-canonical address causes a #GP fault when used by the
* CLFLUSH instruction.
*
* But in the common case we already have a canonical address. This code
* will fix the top bit if needed and is a no-op otherwise.
*/
static inline unsigned long make_addr_canonical_again(unsigned long addr)
{
#ifdef CONFIG_X86_64
return (long)(addr << 1) >> 1;
#else
return addr;
#endif
}
static int change_page_attr_set_clr(unsigned long *addr, int numpages, static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr, pgprot_t mask_set, pgprot_t mask_clr,
int force_split, int in_flag, int force_split, int in_flag,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment