Commit 4b696dcb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "This update contains:

   - Hopefully the last ASM CLAC fixups

   - A fix for the Quark family related to the IMR lock which makes
     kexec work again

   - A off-by-one fix in the MPX code.  Ironic, isn't it?

   - A fix for X86_PAE which addresses once more an unsigned long vs
     phys_addr_t hickup"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mpx: Fix off-by-one comparison with nr_registers
  x86/mm: Fix slow_virt_to_phys() for X86_PAE again
  x86/entry/compat: Add missing CLAC to entry_INT80_32
  x86/entry/32: Add an ASM_CLAC to entry_SYSENTER_32
  x86/platform/intel/quark: Change the kernel's IMR lock bit to false
parents 76c03f0f 9bf148cb
......@@ -294,6 +294,7 @@ sysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
pushl %ebp /* pt_regs->sp (stashed in bp) */
pushfl /* pt_regs->flags (except IF = 0) */
ASM_CLAC /* Clear AC after saving FLAGS */
orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
pushl $__USER_CS /* pt_regs->cs */
pushl $0 /* pt_regs->ip = 0 (placeholder) */
......
......@@ -261,6 +261,7 @@ ENTRY(entry_INT80_compat)
* Interrupts are off on entry.
*/
PARAVIRT_ADJUST_EXCEPTION_FRAME
ASM_CLAC /* Do this early to minimize exposure */
SWAPGS
/*
......
......@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
break;
}
if (regno > nr_registers) {
if (regno >= nr_registers) {
WARN_ONCE(1, "decoded an instruction with an invalid register");
return -EINVAL;
}
......
......@@ -419,24 +419,30 @@ pmd_t *lookup_pmd_address(unsigned long address)
phys_addr_t slow_virt_to_phys(void *__virt_addr)
{
unsigned long virt_addr = (unsigned long)__virt_addr;
unsigned long phys_addr, offset;
phys_addr_t phys_addr;
unsigned long offset;
enum pg_level level;
pte_t *pte;
pte = lookup_address(virt_addr, &level);
BUG_ON(!pte);
/*
* pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
* before being left-shifted PAGE_SHIFT bits -- this trick is to
* make 32-PAE kernel work correctly.
*/
switch (level) {
case PG_LEVEL_1G:
phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PUD_PAGE_MASK;
break;
case PG_LEVEL_2M:
phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PMD_PAGE_MASK;
break;
default:
phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
offset = virt_addr & ~PAGE_MASK;
}
......
......@@ -592,14 +592,14 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
end = (unsigned long)__end_rodata - 1;
/*
* Setup a locked IMR around the physical extent of the kernel
* Setup an unlocked IMR around the physical extent of the kernel
* from the beginning of the .text secton to the end of the
* .rodata section as one physically contiguous block.
*
* We don't round up @size since it is already PAGE_SIZE aligned.
* See vmlinux.lds.S for details.
*/
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
if (ret < 0) {
pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
size / 1024, start, end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment