Commit 9ce04f92 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  ptrace, x86: fix the usage of ptrace_fork()
  i8327: fix outb() parameter order
  x86: fix math_emu register frame access
  x86: math_emu info cleanup
  x86: include correct %gs in a.out core dump
  x86, vmi: put a missing paravirt_release_pmd in pgd_dtor
  x86: find nr_irqs_gsi with mp_ioapic_routing
  x86: add clflush before monitor for Intel 7400 series
  x86: disable intel_iommu support by default
  x86: don't apply __supported_pte_mask to non-present ptes
  x86: fix grammar in user-visible BIOS warning
  x86/Kconfig.cpu: make Kconfig help readable in the console
  x86, 64-bit: print DMI info in the oops trace
parents b3f2caaa 06eb23b1
...@@ -937,6 +937,8 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -937,6 +937,8 @@ and is between 256 and 4096 characters. It is defined in the file
intel_iommu= [DMAR] Intel IOMMU driver (DMAR) option intel_iommu= [DMAR] Intel IOMMU driver (DMAR) option
on
Enable intel iommu driver.
off off
Disable intel iommu driver. Disable intel iommu driver.
igfx_off [Default Off] igfx_off [Default Off]
......
...@@ -1802,6 +1802,17 @@ config DMAR ...@@ -1802,6 +1802,17 @@ config DMAR
and include PCI device scope covered by these DMA and include PCI device scope covered by these DMA
remapping devices. remapping devices.
config DMAR_DEFAULT_ON
def_bool n
prompt "Enable DMA Remapping Devices by default"
depends on DMAR
help
Selecting this option will enable a DMAR device at boot time if
one is found. If this option is not selected, DMAR support can
be enabled by passing intel_iommu=on to the kernel. It is
recommended you say N here while the DMAR code remains
experimental.
config DMAR_GFX_WA config DMAR_GFX_WA
def_bool y def_bool y
prompt "Support for Graphics workaround" prompt "Support for Graphics workaround"
......
...@@ -167,9 +167,9 @@ config MK7 ...@@ -167,9 +167,9 @@ config MK7
config MK8 config MK8
bool "Opteron/Athlon64/Hammer/K8" bool "Opteron/Athlon64/Hammer/K8"
help help
Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables Select this for an AMD Opteron or Athlon64 Hammer-family processor.
use of some extended instructions, and passes appropriate optimization Enables use of some extended instructions, and passes appropriate
flags to GCC. optimization flags to GCC.
config MCRUSOE config MCRUSOE
bool "Crusoe" bool "Crusoe"
...@@ -256,9 +256,11 @@ config MPSC ...@@ -256,9 +256,11 @@ config MPSC
config MCORE2 config MCORE2
bool "Core 2/newer Xeon" bool "Core 2/newer Xeon"
help help
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx)
CPUs. You can distinguish newer from older Xeons by the CPU family Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
in /proc/cpuinfo. Newer ones have 6 and older ones 15 (not a typo) 53xx) CPUs. You can distinguish newer from older Xeons by the CPU
family in /proc/cpuinfo. Newer ones have 6 and older ones 15
(not a typo)
config GENERIC_CPU config GENERIC_CPU
bool "Generic-x86-64" bool "Generic-x86-64"
...@@ -320,14 +322,14 @@ config X86_PPRO_FENCE ...@@ -320,14 +322,14 @@ config X86_PPRO_FENCE
bool "PentiumPro memory ordering errata workaround" bool "PentiumPro memory ordering errata workaround"
depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
help help
Old PentiumPro multiprocessor systems had errata that could cause memory Old PentiumPro multiprocessor systems had errata that could cause
operations to violate the x86 ordering standard in rare cases. Enabling this memory operations to violate the x86 ordering standard in rare cases.
option will attempt to work around some (but not all) occurances of Enabling this option will attempt to work around some (but not all)
this problem, at the cost of much heavier spinlock and memory barrier occurances of this problem, at the cost of much heavier spinlock and
operations. memory barrier operations.
If unsure, say n here. Even distro kernels should think twice before enabling If unsure, say n here. Even distro kernels should think twice before
this: there are few systems, and an unlikely bug. enabling this: there are few systems, and an unlikely bug.
config X86_F00F_BUG config X86_F00F_BUG
def_bool y def_bool y
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
*/ */
static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
{ {
u16 gs;
/* changed the size calculations - should hopefully work better. lbt */ /* changed the size calculations - should hopefully work better. lbt */
dump->magic = CMAGIC; dump->magic = CMAGIC;
dump->start_code = 0; dump->start_code = 0;
...@@ -57,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) ...@@ -57,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->regs.ds = (u16)regs->ds; dump->regs.ds = (u16)regs->ds;
dump->regs.es = (u16)regs->es; dump->regs.es = (u16)regs->es;
dump->regs.fs = (u16)regs->fs; dump->regs.fs = (u16)regs->fs;
savesegment(gs, gs); savesegment(gs, dump->regs.gs);
dump->regs.orig_ax = regs->orig_ax; dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip; dump->regs.ip = regs->ip;
dump->regs.cs = (u16)regs->cs; dump->regs.cs = (u16)regs->cs;
......
...@@ -93,6 +93,7 @@ ...@@ -93,6 +93,7 @@
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ #define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ #define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
......
#ifndef _ASM_X86_MATH_EMU_H #ifndef _ASM_X86_MATH_EMU_H
#define _ASM_X86_MATH_EMU_H #define _ASM_X86_MATH_EMU_H
#include <asm/ptrace.h>
#include <asm/vm86.h>
/* This structure matches the layout of the data saved to the stack /* This structure matches the layout of the data saved to the stack
following a device-not-present interrupt, part of it saved following a device-not-present interrupt, part of it saved
automatically by the 80386/80486. automatically by the 80386/80486.
*/ */
struct info { struct math_emu_info {
long ___orig_eip; long ___orig_eip;
long ___ebx; union {
long ___ecx; struct pt_regs *regs;
long ___edx; struct kernel_vm86_regs *vm86;
long ___esi; };
long ___edi;
long ___ebp;
long ___eax;
long ___ds;
long ___es;
long ___fs;
long ___orig_eax;
long ___eip;
long ___cs;
long ___eflags;
long ___esp;
long ___ss;
long ___vm86_es; /* This and the following only in vm86 mode */
long ___vm86_ds;
long ___vm86_fs;
long ___vm86_gs;
}; };
#endif /* _ASM_X86_MATH_EMU_H */ #endif /* _ASM_X86_MATH_EMU_H */
...@@ -60,6 +60,7 @@ extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, ...@@ -60,6 +60,7 @@ extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
u32 gsi); u32 gsi);
extern void mp_config_acpi_legacy_irqs(void); extern void mp_config_acpi_legacy_irqs(void);
extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low);
extern int acpi_probe_gsi(void);
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
u32 gsi, int triggering, int polarity); u32 gsi, int triggering, int polarity);
...@@ -71,6 +72,11 @@ mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin, ...@@ -71,6 +72,11 @@ mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
return 0; return 0;
} }
#endif #endif
#else /* !CONFIG_ACPI: */
static inline int acpi_probe_gsi(void)
{
return 0;
}
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
......
...@@ -302,16 +302,30 @@ static inline pte_t pte_mkspecial(pte_t pte) ...@@ -302,16 +302,30 @@ static inline pte_t pte_mkspecial(pte_t pte)
extern pteval_t __supported_pte_mask; extern pteval_t __supported_pte_mask;
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
*/
static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
{
pgprotval_t protval = pgprot_val(pgprot);
if (protval & _PAGE_PRESENT)
protval &= __supported_pte_mask;
return protval;
}
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{ {
return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
pgprot_val(pgprot)) & __supported_pte_mask); massage_pgprot(pgprot));
} }
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{ {
return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) | return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
pgprot_val(pgprot)) & __supported_pte_mask); massage_pgprot(pgprot));
} }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
...@@ -323,7 +337,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -323,7 +337,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* the newprot (if present): * the newprot (if present):
*/ */
val &= _PAGE_CHG_MASK; val &= _PAGE_CHG_MASK;
val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask; val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
return __pte(val); return __pte(val);
} }
...@@ -339,7 +353,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) ...@@ -339,7 +353,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) #define canon_pgprot(p) __pgprot(massage_pgprot(p))
static inline int is_new_memtype_allowed(unsigned long flags, static inline int is_new_memtype_allowed(unsigned long flags,
unsigned long new_flags) unsigned long new_flags)
......
...@@ -353,7 +353,7 @@ struct i387_soft_struct { ...@@ -353,7 +353,7 @@ struct i387_soft_struct {
u8 no_update; u8 no_update;
u8 rm; u8 rm;
u8 alimit; u8 alimit;
struct info *info; struct math_emu_info *info;
u32 entry_eip; u32 entry_eip;
}; };
......
...@@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long); ...@@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long);
dotraplinkage void do_overflow(struct pt_regs *, long); dotraplinkage void do_overflow(struct pt_regs *, long);
dotraplinkage void do_bounds(struct pt_regs *, long); dotraplinkage void do_bounds(struct pt_regs *, long);
dotraplinkage void do_invalid_op(struct pt_regs *, long); dotraplinkage void do_invalid_op(struct pt_regs *, long);
dotraplinkage void do_device_not_available(struct pt_regs *, long); dotraplinkage void do_device_not_available(struct pt_regs);
dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
dotraplinkage void do_invalid_TSS(struct pt_regs *, long); dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
dotraplinkage void do_segment_not_present(struct pt_regs *, long); dotraplinkage void do_segment_not_present(struct pt_regs *, long);
...@@ -77,7 +77,7 @@ extern int panic_on_unrecovered_nmi; ...@@ -77,7 +77,7 @@ extern int panic_on_unrecovered_nmi;
extern int kstack_depth_to_print; extern int kstack_depth_to_print;
void math_error(void __user *); void math_error(void __user *);
asmlinkage void math_emulate(long); void math_emulate(struct math_emu_info *);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
unsigned long patch_espfix_desc(unsigned long, unsigned long); unsigned long patch_espfix_desc(unsigned long, unsigned long);
#else #else
......
...@@ -137,7 +137,7 @@ static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) ...@@ -137,7 +137,7 @@ static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
pte_t pte; pte_t pte;
pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) | pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
(pgprot_val(pgprot) & __supported_pte_mask); massage_pgprot(pgprot);
return pte; return pte;
} }
......
...@@ -973,6 +973,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) ...@@ -973,6 +973,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
nr_ioapics++; nr_ioapics++;
} }
int __init acpi_probe_gsi(void)
{
int idx;
int gsi;
int max_gsi = 0;
if (acpi_disabled)
return 0;
if (!acpi_ioapic)
return 0;
max_gsi = 0;
for (idx = 0; idx < nr_ioapics; idx++) {
gsi = mp_ioapic_routing[idx].gsi_end;
if (gsi > max_gsi)
max_gsi = gsi;
}
return max_gsi + 1;
}
static void assign_to_mp_irq(struct mp_config_intsrc *m, static void assign_to_mp_irq(struct mp_config_intsrc *m,
struct mp_config_intsrc *mp_irq) struct mp_config_intsrc *mp_irq)
{ {
......
...@@ -291,6 +291,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) ...@@ -291,6 +291,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
ds_init_intel(c); ds_init_intel(c);
} }
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (c->x86 == 15) if (c->x86 == 15)
c->x86_cache_alignment = c->x86_clflush_size * 2; c->x86_cache_alignment = c->x86_clflush_size * 2;
......
...@@ -28,10 +28,10 @@ static int i8237A_resume(struct sys_device *dev) ...@@ -28,10 +28,10 @@ static int i8237A_resume(struct sys_device *dev)
flags = claim_dma_lock(); flags = claim_dma_lock();
dma_outb(DMA1_RESET_REG, 0); dma_outb(0, DMA1_RESET_REG);
dma_outb(DMA2_RESET_REG, 0); dma_outb(0, DMA2_RESET_REG);
for (i = 0;i < 8;i++) { for (i = 0; i < 8; i++) {
set_dma_addr(i, 0x000000); set_dma_addr(i, 0x000000);
/* DMA count is a bit weird so this is not 0 */ /* DMA count is a bit weird so this is not 0 */
set_dma_count(i, 1); set_dma_count(i, 1);
...@@ -51,14 +51,14 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state) ...@@ -51,14 +51,14 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state)
} }
static struct sysdev_class i8237_sysdev_class = { static struct sysdev_class i8237_sysdev_class = {
.name = "i8237", .name = "i8237",
.suspend = i8237A_suspend, .suspend = i8237A_suspend,
.resume = i8237A_resume, .resume = i8237A_resume,
}; };
static struct sys_device device_i8237A = { static struct sys_device device_i8237A = {
.id = 0, .id = 0,
.cls = &i8237_sysdev_class, .cls = &i8237_sysdev_class,
}; };
static int __init i8237A_init_sysfs(void) static int __init i8237A_init_sysfs(void)
...@@ -68,5 +68,4 @@ static int __init i8237A_init_sysfs(void) ...@@ -68,5 +68,4 @@ static int __init i8237A_init_sysfs(void)
error = sysdev_register(&device_i8237A); error = sysdev_register(&device_i8237A);
return error; return error;
} }
device_initcall(i8237A_init_sysfs); device_initcall(i8237A_init_sysfs);
...@@ -3841,14 +3841,24 @@ int __init io_apic_get_redir_entries (int ioapic) ...@@ -3841,14 +3841,24 @@ int __init io_apic_get_redir_entries (int ioapic)
void __init probe_nr_irqs_gsi(void) void __init probe_nr_irqs_gsi(void)
{ {
int idx;
int nr = 0; int nr = 0;
for (idx = 0; idx < nr_ioapics; idx++) nr = acpi_probe_gsi();
nr += io_apic_get_redir_entries(idx) + 1; if (nr > nr_irqs_gsi) {
if (nr > nr_irqs_gsi)
nr_irqs_gsi = nr; nr_irqs_gsi = nr;
} else {
/* for acpi=off or acpi is not compiled in */
int idx;
nr = 0;
for (idx = 0; idx < nr_ioapics; idx++)
nr += io_apic_get_redir_entries(idx) + 1;
if (nr > nr_irqs_gsi)
nr_irqs_gsi = nr;
}
printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
} }
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
......
...@@ -180,6 +180,9 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) ...@@ -180,6 +180,9 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
trace_power_start(&it, POWER_CSTATE, (ax>>4)+1); trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
if (!need_resched()) { if (!need_resched()) {
if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0); __monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb(); smp_mb();
if (!need_resched()) if (!need_resched())
...@@ -194,6 +197,9 @@ static void mwait_idle(void) ...@@ -194,6 +197,9 @@ static void mwait_idle(void)
struct power_trace it; struct power_trace it;
if (!need_resched()) { if (!need_resched()) {
trace_power_start(&it, POWER_CSTATE, 1); trace_power_start(&it, POWER_CSTATE, 1);
if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0); __monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb(); smp_mb();
if (!need_resched()) if (!need_resched())
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/dmi.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -151,14 +152,18 @@ void __show_regs(struct pt_regs *regs, int all) ...@@ -151,14 +152,18 @@ void __show_regs(struct pt_regs *regs, int all)
unsigned long d0, d1, d2, d3, d6, d7; unsigned long d0, d1, d2, d3, d6, d7;
unsigned int fsindex, gsindex; unsigned int fsindex, gsindex;
unsigned int ds, cs, es; unsigned int ds, cs, es;
const char *board;
printk("\n"); printk("\n");
print_modules(); print_modules();
printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n", board = dmi_get_system_info(DMI_PRODUCT_NAME);
if (!board)
board = "";
printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
current->pid, current->comm, print_tainted(), current->pid, current->comm, print_tainted(),
init_utsname()->release, init_utsname()->release,
(int)strcspn(init_utsname()->version, " "), (int)strcspn(init_utsname()->version, " "),
init_utsname()->version); init_utsname()->version, board);
printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
printk_address(regs->ip, 1); printk_address(regs->ip, 1);
printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
......
...@@ -607,7 +607,7 @@ struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; ...@@ -607,7 +607,7 @@ struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
{ {
printk(KERN_NOTICE printk(KERN_NOTICE
"%s detected: BIOS may corrupt low RAM, working it around.\n", "%s detected: BIOS may corrupt low RAM, working around it.\n",
d->ident); d->ident);
e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED); e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
......
...@@ -896,7 +896,7 @@ asmlinkage void math_state_restore(void) ...@@ -896,7 +896,7 @@ asmlinkage void math_state_restore(void)
EXPORT_SYMBOL_GPL(math_state_restore); EXPORT_SYMBOL_GPL(math_state_restore);
#ifndef CONFIG_MATH_EMULATION #ifndef CONFIG_MATH_EMULATION
asmlinkage void math_emulate(long arg) void math_emulate(struct math_emu_info *info)
{ {
printk(KERN_EMERG printk(KERN_EMERG
"math-emulation not enabled and no coprocessor found.\n"); "math-emulation not enabled and no coprocessor found.\n");
...@@ -906,16 +906,19 @@ asmlinkage void math_emulate(long arg) ...@@ -906,16 +906,19 @@ asmlinkage void math_emulate(long arg)
} }
#endif /* CONFIG_MATH_EMULATION */ #endif /* CONFIG_MATH_EMULATION */
dotraplinkage void __kprobes dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs)
do_device_not_available(struct pt_regs *regs, long error)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
if (read_cr0() & X86_CR0_EM) { if (read_cr0() & X86_CR0_EM) {
conditional_sti(regs); struct math_emu_info info = { };
math_emulate(0);
conditional_sti(&regs);
info.regs = &regs;
math_emulate(&info);
} else { } else {
math_state_restore(); /* interrupts still off */ math_state_restore(); /* interrupts still off */
conditional_sti(regs); conditional_sti(&regs);
} }
#else #else
math_state_restore(); math_state_restore();
......
...@@ -320,6 +320,16 @@ static void vmi_release_pmd(unsigned long pfn) ...@@ -320,6 +320,16 @@ static void vmi_release_pmd(unsigned long pfn)
vmi_ops.release_page(pfn, VMI_PAGE_L2); vmi_ops.release_page(pfn, VMI_PAGE_L2);
} }
/*
* We use the pgd_free hook for releasing the pgd page:
*/
static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
unsigned long pfn = __pa(pgd) >> PAGE_SHIFT;
vmi_ops.release_page(pfn, VMI_PAGE_L2);
}
/* /*
* Helper macros for MMU update flags. We can defer updates until a flush * Helper macros for MMU update flags. We can defer updates until a flush
* or page invalidation only if the update is to the current address space * or page invalidation only if the update is to the current address space
...@@ -762,6 +772,7 @@ static inline int __init activate_vmi(void) ...@@ -762,6 +772,7 @@ static inline int __init activate_vmi(void)
if (vmi_ops.release_page) { if (vmi_ops.release_page) {
pv_mmu_ops.release_pte = vmi_release_pte; pv_mmu_ops.release_pte = vmi_release_pte;
pv_mmu_ops.release_pmd = vmi_release_pmd; pv_mmu_ops.release_pmd = vmi_release_pmd;
pv_mmu_ops.pgd_free = vmi_pgd_free;
} }
/* Set linear is needed in all cases */ /* Set linear is needed in all cases */
......
...@@ -131,7 +131,7 @@ u_char emulating = 0; ...@@ -131,7 +131,7 @@ u_char emulating = 0;
static int valid_prefix(u_char *Byte, u_char __user ** fpu_eip, static int valid_prefix(u_char *Byte, u_char __user ** fpu_eip,
overrides * override); overrides * override);
asmlinkage void math_emulate(long arg) void math_emulate(struct math_emu_info *info)
{ {
u_char FPU_modrm, byte1; u_char FPU_modrm, byte1;
unsigned short code; unsigned short code;
...@@ -161,7 +161,7 @@ asmlinkage void math_emulate(long arg) ...@@ -161,7 +161,7 @@ asmlinkage void math_emulate(long arg)
RE_ENTRANT_CHECK_ON; RE_ENTRANT_CHECK_ON;
#endif /* RE_ENTRANT_CHECKING */ #endif /* RE_ENTRANT_CHECKING */
SETUP_DATA_AREA(arg); FPU_info = info;
FPU_ORIG_EIP = FPU_EIP; FPU_ORIG_EIP = FPU_EIP;
...@@ -659,7 +659,7 @@ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip, ...@@ -659,7 +659,7 @@ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
} }
} }
void math_abort(struct info *info, unsigned int signal) void math_abort(struct math_emu_info *info, unsigned int signal)
{ {
FPU_EIP = FPU_ORIG_EIP; FPU_EIP = FPU_ORIG_EIP;
current->thread.trap_no = 16; current->thread.trap_no = 16;
......
...@@ -51,8 +51,8 @@ extern void ffreep(void); ...@@ -51,8 +51,8 @@ extern void ffreep(void);
extern void fst_i_(void); extern void fst_i_(void);
extern void fstp_i(void); extern void fstp_i(void);
/* fpu_entry.c */ /* fpu_entry.c */
asmlinkage extern void math_emulate(long arg); extern void math_emulate(struct math_emu_info *info);
extern void math_abort(struct info *info, unsigned int signal); extern void math_abort(struct math_emu_info *info, unsigned int signal);
/* fpu_etc.c */ /* fpu_etc.c */
extern void FPU_etc(void); extern void FPU_etc(void);
/* fpu_tags.c */ /* fpu_tags.c */
......
...@@ -16,10 +16,6 @@ ...@@ -16,10 +16,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
/* This sets the pointer FPU_info to point to the argument part
of the stack frame of math_emulate() */
#define SETUP_DATA_AREA(arg) FPU_info = (struct info *) &arg
/* s is always from a cpu register, and the cpu does bounds checking /* s is always from a cpu register, and the cpu does bounds checking
* during register load --> no further bounds checks needed */ * during register load --> no further bounds checks needed */
#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) #define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
...@@ -38,12 +34,12 @@ ...@@ -38,12 +34,12 @@
#define I387 (current->thread.xstate) #define I387 (current->thread.xstate)
#define FPU_info (I387->soft.info) #define FPU_info (I387->soft.info)
#define FPU_CS (*(unsigned short *) &(FPU_info->___cs)) #define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs))
#define FPU_SS (*(unsigned short *) &(FPU_info->___ss)) #define FPU_SS (*(unsigned short *) &(FPU_info->regs->ss))
#define FPU_DS (*(unsigned short *) &(FPU_info->___ds)) #define FPU_DS (*(unsigned short *) &(FPU_info->regs->ds))
#define FPU_EAX (FPU_info->___eax) #define FPU_EAX (FPU_info->regs->ax)
#define FPU_EFLAGS (FPU_info->___eflags) #define FPU_EFLAGS (FPU_info->regs->flags)
#define FPU_EIP (FPU_info->___eip) #define FPU_EIP (FPU_info->regs->ip)
#define FPU_ORIG_EIP (FPU_info->___orig_eip) #define FPU_ORIG_EIP (FPU_info->___orig_eip)
#define FPU_lookahead (I387->soft.lookahead) #define FPU_lookahead (I387->soft.lookahead)
......
...@@ -29,46 +29,43 @@ ...@@ -29,46 +29,43 @@
#define FPU_WRITE_BIT 0x10 #define FPU_WRITE_BIT 0x10
static int reg_offset[] = { static int reg_offset[] = {
offsetof(struct info, ___eax), offsetof(struct pt_regs, ax),
offsetof(struct info, ___ecx), offsetof(struct pt_regs, cx),
offsetof(struct info, ___edx), offsetof(struct pt_regs, dx),
offsetof(struct info, ___ebx), offsetof(struct pt_regs, bx),
offsetof(struct info, ___esp), offsetof(struct pt_regs, sp),
offsetof(struct info, ___ebp), offsetof(struct pt_regs, bp),
offsetof(struct info, ___esi), offsetof(struct pt_regs, si),
offsetof(struct info, ___edi) offsetof(struct pt_regs, di)
}; };
#define REG_(x) (*(long *)(reg_offset[(x)]+(u_char *) FPU_info)) #define REG_(x) (*(long *)(reg_offset[(x)] + (u_char *)FPU_info->regs))
static int reg_offset_vm86[] = { static int reg_offset_vm86[] = {
offsetof(struct info, ___cs), offsetof(struct pt_regs, cs),
offsetof(struct info, ___vm86_ds), offsetof(struct kernel_vm86_regs, ds),
offsetof(struct info, ___vm86_es), offsetof(struct kernel_vm86_regs, es),
offsetof(struct info, ___vm86_fs), offsetof(struct kernel_vm86_regs, fs),
offsetof(struct info, ___vm86_gs), offsetof(struct kernel_vm86_regs, gs),
offsetof(struct info, ___ss), offsetof(struct pt_regs, ss),
offsetof(struct info, ___vm86_ds) offsetof(struct kernel_vm86_regs, ds)
}; };
#define VM86_REG_(x) (*(unsigned short *) \ #define VM86_REG_(x) (*(unsigned short *) \
(reg_offset_vm86[((unsigned)x)]+(u_char *) FPU_info)) (reg_offset_vm86[((unsigned)x)] + (u_char *)FPU_info->regs))
/* This dummy, gs is not saved on the stack. */
#define ___GS ___ds
static int reg_offset_pm[] = { static int reg_offset_pm[] = {
offsetof(struct info, ___cs), offsetof(struct pt_regs, cs),
offsetof(struct info, ___ds), offsetof(struct pt_regs, ds),
offsetof(struct info, ___es), offsetof(struct pt_regs, es),
offsetof(struct info, ___fs), offsetof(struct pt_regs, fs),
offsetof(struct info, ___GS), offsetof(struct pt_regs, ds), /* dummy, not saved on stack */
offsetof(struct info, ___ss), offsetof(struct pt_regs, ss),
offsetof(struct info, ___ds) offsetof(struct pt_regs, ds)
}; };
#define PM_REG_(x) (*(unsigned short *) \ #define PM_REG_(x) (*(unsigned short *) \
(reg_offset_pm[((unsigned)x)]+(u_char *) FPU_info)) (reg_offset_pm[((unsigned)x)] + (u_char *)FPU_info->regs))
/* Decode the SIB byte. This function assumes mod != 0 */ /* Decode the SIB byte. This function assumes mod != 0 */
static int sib(int mod, unsigned long *fpu_eip) static int sib(int mod, unsigned long *fpu_eip)
...@@ -349,34 +346,34 @@ void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip, ...@@ -349,34 +346,34 @@ void __user *FPU_get_address_16(u_char FPU_modrm, unsigned long *fpu_eip,
} }
switch (rm) { switch (rm) {
case 0: case 0:
address += FPU_info->___ebx + FPU_info->___esi; address += FPU_info->regs->bx + FPU_info->regs->si;
break; break;
case 1: case 1:
address += FPU_info->___ebx + FPU_info->___edi; address += FPU_info->regs->bx + FPU_info->regs->di;
break; break;
case 2: case 2:
address += FPU_info->___ebp + FPU_info->___esi; address += FPU_info->regs->bp + FPU_info->regs->si;
if (addr_modes.override.segment == PREFIX_DEFAULT) if (addr_modes.override.segment == PREFIX_DEFAULT)
addr_modes.override.segment = PREFIX_SS_; addr_modes.override.segment = PREFIX_SS_;
break; break;
case 3: case 3:
address += FPU_info->___ebp + FPU_info->___edi; address += FPU_info->regs->bp + FPU_info->regs->di;
if (addr_modes.override.segment == PREFIX_DEFAULT) if (addr_modes.override.segment == PREFIX_DEFAULT)
addr_modes.override.segment = PREFIX_SS_; addr_modes.override.segment = PREFIX_SS_;
break; break;
case 4: case 4:
address += FPU_info->___esi; address += FPU_info->regs->si;
break; break;
case 5: case 5:
address += FPU_info->___edi; address += FPU_info->regs->di;
break; break;
case 6: case 6:
address += FPU_info->___ebp; address += FPU_info->regs->bp;
if (addr_modes.override.segment == PREFIX_DEFAULT) if (addr_modes.override.segment == PREFIX_DEFAULT)
addr_modes.override.segment = PREFIX_SS_; addr_modes.override.segment = PREFIX_SS_;
break; break;
case 7: case 7:
address += FPU_info->___ebx; address += FPU_info->regs->bx;
break; break;
} }
......
...@@ -268,7 +268,12 @@ static long list_size; ...@@ -268,7 +268,12 @@ static long list_size;
static void domain_remove_dev_info(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain);
int dmar_disabled; #ifdef CONFIG_DMAR_DEFAULT_ON
int dmar_disabled = 0;
#else
int dmar_disabled = 1;
#endif /*CONFIG_DMAR_DEFAULT_ON*/
static int __initdata dmar_map_gfx = 1; static int __initdata dmar_map_gfx = 1;
static int dmar_forcedac; static int dmar_forcedac;
static int intel_iommu_strict; static int intel_iommu_strict;
...@@ -284,9 +289,12 @@ static int __init intel_iommu_setup(char *str) ...@@ -284,9 +289,12 @@ static int __init intel_iommu_setup(char *str)
if (!str) if (!str)
return -EINVAL; return -EINVAL;
while (*str) { while (*str) {
if (!strncmp(str, "off", 3)) { if (!strncmp(str, "on", 2)) {
dmar_disabled = 0;
printk(KERN_INFO "Intel-IOMMU: enabled\n");
} else if (!strncmp(str, "off", 3)) {
dmar_disabled = 1; dmar_disabled = 1;
printk(KERN_INFO"Intel-IOMMU: disabled\n"); printk(KERN_INFO "Intel-IOMMU: disabled\n");
} else if (!strncmp(str, "igfx_off", 8)) { } else if (!strncmp(str, "igfx_off", 8)) {
dmar_map_gfx = 0; dmar_map_gfx = 0;
printk(KERN_INFO printk(KERN_INFO
......
...@@ -1094,7 +1094,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1094,7 +1094,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_DEBUG_MUTEXES #ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */ p->blocked_on = NULL; /* not blocked yet */
#endif #endif
if (unlikely(ptrace_reparented(current))) if (unlikely(current->ptrace))
ptrace_fork(p, clone_flags); ptrace_fork(p, clone_flags);
/* Perform scheduler related setup. Assign this task to a CPU. */ /* Perform scheduler related setup. Assign this task to a CPU. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment