Commit a9815a4f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "A set of x86 fixes and functional updates:

   - Prevent stale huge I/O TLB mappings on 32bit. A long standing bug
     which got exposed by KPTI support for 32bit

   - Prevent bogus access_ok() warnings in arch_stack_walk_user()

   - Add display quirks for Lenovo devices which have height and width
     swapped

   - Add the missing CR2 fixup for 32 bit async pagefaults. Fallout of
     the CR2 bug fix series.

   - Unbreak handling of force enabled HPET by moving the 'is HPET
     counting' check back to the original place.

   - A more accurate check for running on a hypervisor platform in the
     MDS mitigation code. Not perfect, but more accurate than the
     previous one.

   - Update a stale and confusing comment vs. IRQ stacks"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/speculation/mds: Apply more accurate check on hypervisor platform
  x86/hpet: Undo the early counter is counting check
  x86/entry/32: Pass cr2 to do_async_page_fault()
  x86/irq/64: Update stale comment
  x86/sysfb_efi: Add quirks for some devices with swapped width and height
  x86/stacktrace: Prevent access_ok() warnings in arch_stack_walk_user()
  mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()
  x86/mm: Sync also unmappings in vmalloc_sync_all()
  x86/mm: Check for pfn instead of page in vmalloc_sync_one()
parents e24ce84e 517c3ba0
...@@ -1443,8 +1443,12 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, ...@@ -1443,8 +1443,12 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
ENTRY(page_fault) ENTRY(page_fault)
ASM_CLAC ASM_CLAC
pushl $0; /* %gs's slot on the stack */ pushl $do_page_fault
jmp common_exception_read_cr2
END(page_fault)
common_exception_read_cr2:
/* the function address is in %gs's slot on the stack */
SAVE_ALL switch_stacks=1 skip_gs=1 SAVE_ALL switch_stacks=1 skip_gs=1
ENCODE_FRAME_POINTER ENCODE_FRAME_POINTER
...@@ -1452,6 +1456,7 @@ ENTRY(page_fault) ...@@ -1452,6 +1456,7 @@ ENTRY(page_fault)
/* fixup %gs */ /* fixup %gs */
GS_TO_REG %ecx GS_TO_REG %ecx
movl PT_GS(%esp), %edi
REG_TO_PTGS %ecx REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx SET_KERNEL_GS %ecx
...@@ -1463,9 +1468,9 @@ ENTRY(page_fault) ...@@ -1463,9 +1468,9 @@ ENTRY(page_fault)
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer movl %esp, %eax # pt_regs pointer
call do_page_fault CALL_NOSPEC %edi
jmp ret_from_exception jmp ret_from_exception
END(page_fault) END(common_exception_read_cr2)
common_exception: common_exception:
/* the function address is in %gs's slot on the stack */ /* the function address is in %gs's slot on the stack */
...@@ -1595,7 +1600,7 @@ END(general_protection) ...@@ -1595,7 +1600,7 @@ END(general_protection)
ENTRY(async_page_fault) ENTRY(async_page_fault)
ASM_CLAC ASM_CLAC
pushl $do_async_page_fault pushl $do_async_page_fault
jmp common_exception jmp common_exception_read_cr2
END(async_page_fault) END(async_page_fault)
#endif #endif
......
...@@ -1226,7 +1226,7 @@ static ssize_t l1tf_show_state(char *buf) ...@@ -1226,7 +1226,7 @@ static ssize_t l1tf_show_state(char *buf)
static ssize_t mds_show_state(char *buf) static ssize_t mds_show_state(char *buf)
{ {
if (!hypervisor_is_type(X86_HYPER_NATIVE)) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
return sprintf(buf, "%s; SMT Host state unknown\n", return sprintf(buf, "%s; SMT Host state unknown\n",
mds_strings[mds_mitigation]); mds_strings[mds_mitigation]);
} }
......
...@@ -193,10 +193,10 @@ ENTRY(secondary_startup_64) ...@@ -193,10 +193,10 @@ ENTRY(secondary_startup_64)
/* Set up %gs. /* Set up %gs.
* *
* The base of %gs always points to the bottom of the irqstack * The base of %gs always points to fixed_percpu_data. If the
* union. If the stack protector canary is enabled, it is * stack protector canary is enabled, it is located at %gs:40.
* located at %gs:40. Note that, on SMP, the boot cpu uses * Note that, on SMP, the boot cpu uses init data section until
* init data section till per cpu areas are set up. * the per cpu areas are set up.
*/ */
movl $MSR_GS_BASE,%ecx movl $MSR_GS_BASE,%ecx
movl initial_gs(%rip),%eax movl initial_gs(%rip),%eax
......
...@@ -827,10 +827,6 @@ int __init hpet_enable(void) ...@@ -827,10 +827,6 @@ int __init hpet_enable(void)
if (!hpet_cfg_working()) if (!hpet_cfg_working())
goto out_nohpet; goto out_nohpet;
/* Validate that the counter is counting */
if (!hpet_counting())
goto out_nohpet;
/* /*
* Read the period and check for a sane value: * Read the period and check for a sane value:
*/ */
...@@ -896,6 +892,14 @@ int __init hpet_enable(void) ...@@ -896,6 +892,14 @@ int __init hpet_enable(void)
} }
hpet_print_config(); hpet_print_config();
/*
* Validate that the counter is counting. This needs to be done
* after sanitizing the config registers to properly deal with
* force enabled HPETs.
*/
if (!hpet_counting())
goto out_nohpet;
clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
if (id & HPET_ID_LEGSUP) { if (id & HPET_ID_LEGSUP) {
......
...@@ -100,7 +100,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) ...@@ -100,7 +100,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
{ {
int ret; int ret;
if (!access_ok(fp, sizeof(*frame))) if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
return 0; return 0;
ret = 1; ret = 1;
......
...@@ -230,9 +230,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = { ...@@ -230,9 +230,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
{}, {},
}; };
/*
* Some devices have a portrait LCD but advertise a landscape resolution (and
* pitch). We simply swap width and height for these devices so that we can
* correctly deal with some of them coming with multiple resolutions.
*/
static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
{
/*
* Lenovo MIIX310-10ICR, only some batches have the troublesome
* 800x1280 portrait screen. Luckily the portrait version has
* its own BIOS version, so we match on that.
*/
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
},
},
{
/* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
"Lenovo MIIX 320-10ICR"),
},
},
{
/* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
"Lenovo ideapad D330-10IGM"),
},
},
{},
};
__init void sysfb_apply_efi_quirks(void) __init void sysfb_apply_efi_quirks(void)
{ {
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
dmi_check_system(efifb_dmi_system_table); dmi_check_system(efifb_dmi_system_table);
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
dmi_check_system(efifb_dmi_swap_width_height)) {
u16 temp = screen_info.lfb_width;
screen_info.lfb_width = screen_info.lfb_height;
screen_info.lfb_height = temp;
screen_info.lfb_linelength = 4 * screen_info.lfb_width;
}
} }
...@@ -177,13 +177,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) ...@@ -177,13 +177,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address); pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
return NULL;
if (!pmd_present(*pmd)) if (pmd_present(*pmd) != pmd_present(*pmd_k))
set_pmd(pmd, *pmd_k); set_pmd(pmd, *pmd_k);
if (!pmd_present(*pmd_k))
return NULL;
else else
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
return pmd_k; return pmd_k;
} }
...@@ -203,17 +204,13 @@ void vmalloc_sync_all(void) ...@@ -203,17 +204,13 @@ void vmalloc_sync_all(void)
spin_lock(&pgd_lock); spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) { list_for_each_entry(page, &pgd_list, lru) {
spinlock_t *pgt_lock; spinlock_t *pgt_lock;
pmd_t *ret;
/* the pgt_lock only for Xen */ /* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock; pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock); spin_lock(pgt_lock);
ret = vmalloc_sync_one(page_address(page), address); vmalloc_sync_one(page_address(page), address);
spin_unlock(pgt_lock); spin_unlock(pgt_lock);
if (!ret)
break;
} }
spin_unlock(&pgd_lock); spin_unlock(&pgd_lock);
} }
......
...@@ -1258,6 +1258,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) ...@@ -1258,6 +1258,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
if (unlikely(valist == NULL)) if (unlikely(valist == NULL))
return false; return false;
/*
* First make sure the mappings are removed from all page-tables
* before they are freed.
*/
vmalloc_sync_all();
/* /*
* TODO: to calculate a flush range without looping. * TODO: to calculate a flush range without looping.
* The list can be up to lazy_max_pages() elements. * The list can be up to lazy_max_pages() elements.
...@@ -3038,6 +3044,9 @@ EXPORT_SYMBOL(remap_vmalloc_range); ...@@ -3038,6 +3044,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
/* /*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to * Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one. * have one.
*
* The purpose of this function is to make sure the vmalloc area
* mappings are identical in all page-tables in the system.
*/ */
void __weak vmalloc_sync_all(void) void __weak vmalloc_sync_all(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment