Commit 19e0d5f1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Fixes from all around the place:

   - hyper-V 32-bit PAE guest kernel fix
   - two IRQ allocation fixes on certain x86 boards
   - intel-mid boot crash fix
   - intel-quark quirk
   - /proc/interrupts duplicate irq chip name fix
   - cma boot crash fix
   - syscall audit fix
   - boot crash fix with certain TSC configurations (seen on Qemu)
   - smpboot.c build warning fix"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, pageattr: Prevent overflow in slow_virt_to_phys() for X86_PAE
  ACPI, irq, x86: Return IRQ instead of GSI in mp_register_gsi()
  x86, intel-mid: Create IRQs for APB timers and RTC timers
  x86: Don't enable F00F workaround on Intel Quark processors
  x86/irq: Fix XT-PIC-XT-PIC in /proc/interrupts
  x86, cma: Reserve DMA contiguous area after initmem_init()
  i386/audit: stop scribbling on the stack frame
  x86, apic: Handle a bad TSC more gracefully
  x86: ACPI: Do not translate GSI number if IOAPIC is disabled
  x86/smpboot: Move data structure to its primary usage scope
parents f5fa3630 d1cd1210
...@@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger, ...@@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
/* Don't set up the ACPI SCI because it's already set up */ /* Don't set up the ACPI SCI because it's already set up */
if (acpi_gbl_FADT.sci_interrupt == gsi) if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi; return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
...@@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) ...@@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
{ {
int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); int irq;
if (irq >= 0) { if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
*irqp = gsi;
} else {
irq = mp_map_gsi_to_irq(gsi,
IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
if (irq < 0)
return -1;
*irqp = irq; *irqp = irq;
return 0;
} }
return 0;
return -1;
} }
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
......
...@@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev) ...@@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev)
irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
/* APB timer irqs are set up as mp_irqs, timer is edge type */
__irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
} }
/* Should be called with per cpu */ /* Should be called with per cpu */
......
...@@ -1297,7 +1297,7 @@ void setup_local_APIC(void) ...@@ -1297,7 +1297,7 @@ void setup_local_APIC(void)
unsigned int value, queued; unsigned int value, queued;
int i, j, acked = 0; int i, j, acked = 0;
unsigned long long tsc = 0, ntsc; unsigned long long tsc = 0, ntsc;
long long max_loops = cpu_khz; long long max_loops = cpu_khz ? cpu_khz : 1000000;
if (cpu_has_tsc) if (cpu_has_tsc)
rdtscll(tsc); rdtscll(tsc);
...@@ -1383,7 +1383,7 @@ void setup_local_APIC(void) ...@@ -1383,7 +1383,7 @@ void setup_local_APIC(void)
break; break;
} }
if (queued) { if (queued) {
if (cpu_has_tsc) { if (cpu_has_tsc && cpu_khz) {
rdtscll(ntsc); rdtscll(ntsc);
max_loops = (cpu_khz << 10) - (ntsc - tsc); max_loops = (cpu_khz << 10) - (ntsc - tsc);
} else } else
......
...@@ -213,12 +213,13 @@ static void intel_workarounds(struct cpuinfo_x86 *c) ...@@ -213,12 +213,13 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_X86_F00F_BUG #ifdef CONFIG_X86_F00F_BUG
/* /*
* All current models of Pentium and Pentium with MMX technology CPUs * All models of Pentium and Pentium with MMX technology CPUs
* have the F0 0F bug, which lets nonprivileged users lock up the * have the F0 0F bug, which lets nonprivileged users lock up the
* system. Announce that the fault handler will be checking for it. * system. Announce that the fault handler will be checking for it.
* The Quark is also family 5, but does not have the same bug.
*/ */
clear_cpu_bug(c, X86_BUG_F00F); clear_cpu_bug(c, X86_BUG_F00F);
if (!paravirt_enabled() && c->x86 == 5) { if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) {
static int f00f_workaround_enabled; static int f00f_workaround_enabled;
set_cpu_bug(c, X86_BUG_F00F); set_cpu_bug(c, X86_BUG_F00F);
......
...@@ -447,15 +447,14 @@ sysenter_exit: ...@@ -447,15 +447,14 @@ sysenter_exit:
sysenter_audit: sysenter_audit:
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
jnz syscall_trace_entry jnz syscall_trace_entry
addl $4,%esp /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
CFI_ADJUST_CFA_OFFSET -4 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
movl %esi,4(%esp) /* 5th arg: 4th syscall arg */ /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
movl %edx,(%esp) /* 4th arg: 3rd syscall arg */ pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
/* %ecx already in %ecx 3rd arg: 2nd syscall arg */ pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
movl %ebx,%edx /* 2nd arg: 1st syscall arg */
/* %eax already in %eax 1st arg: syscall number */
call __audit_syscall_entry call __audit_syscall_entry
pushl_cfi %ebx popl_cfi %ecx /* get that remapped edx off the stack */
popl_cfi %ecx /* get that remapped esi off the stack */
movl PT_EAX(%esp),%eax /* reload syscall number */ movl PT_EAX(%esp),%eax /* reload syscall number */
jmp sysenter_do_call jmp sysenter_do_call
......
...@@ -111,8 +111,7 @@ static void make_8259A_irq(unsigned int irq) ...@@ -111,8 +111,7 @@ static void make_8259A_irq(unsigned int irq)
{ {
disable_irq_nosync(irq); disable_irq_nosync(irq);
io_apic_irqs &= ~(1<<irq); io_apic_irqs &= ~(1<<irq);
irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
i8259A_chip.name);
enable_irq(irq); enable_irq(irq);
} }
......
...@@ -70,7 +70,6 @@ int vector_used_by_percpu_irq(unsigned int vector) ...@@ -70,7 +70,6 @@ int vector_used_by_percpu_irq(unsigned int vector)
void __init init_ISA_irqs(void) void __init init_ISA_irqs(void)
{ {
struct irq_chip *chip = legacy_pic->chip; struct irq_chip *chip = legacy_pic->chip;
const char *name = chip->name;
int i; int i;
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
...@@ -79,7 +78,7 @@ void __init init_ISA_irqs(void) ...@@ -79,7 +78,7 @@ void __init init_ISA_irqs(void)
legacy_pic->init(0); legacy_pic->init(0);
for (i = 0; i < nr_legacy_irqs(); i++) for (i = 0; i < nr_legacy_irqs(); i++)
irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); irq_set_chip_and_handler(i, chip, handle_level_irq);
} }
void __init init_IRQ(void) void __init init_IRQ(void)
......
...@@ -1128,7 +1128,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -1128,7 +1128,6 @@ void __init setup_arch(char **cmdline_p)
setup_real_mode(); setup_real_mode();
memblock_set_current_limit(get_max_mapped()); memblock_set_current_limit(get_max_mapped());
dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
/* /*
* NOTE: On x86-32, only from this point on, fixmaps are ready for use. * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
...@@ -1159,6 +1158,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1159,6 +1158,7 @@ void __init setup_arch(char **cmdline_p)
early_acpi_boot_init(); early_acpi_boot_init();
initmem_init(); initmem_init();
dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
/* /*
* Reserve memory for crash kernel after SRAT is parsed so that it * Reserve memory for crash kernel after SRAT is parsed so that it
......
...@@ -102,8 +102,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); ...@@ -102,8 +102,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info);
static DEFINE_PER_CPU(struct completion, die_complete);
atomic_t init_deasserted; atomic_t init_deasserted;
/* /*
...@@ -1318,6 +1316,8 @@ void cpu_disable_common(void) ...@@ -1318,6 +1316,8 @@ void cpu_disable_common(void)
fixup_irqs(); fixup_irqs();
} }
static DEFINE_PER_CPU(struct completion, die_complete);
int native_cpu_disable(void) int native_cpu_disable(void)
{ {
int ret; int ret;
......
...@@ -1166,14 +1166,17 @@ void __init tsc_init(void) ...@@ -1166,14 +1166,17 @@ void __init tsc_init(void)
x86_init.timers.tsc_pre_init(); x86_init.timers.tsc_pre_init();
if (!cpu_has_tsc) if (!cpu_has_tsc) {
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return; return;
}
tsc_khz = x86_platform.calibrate_tsc(); tsc_khz = x86_platform.calibrate_tsc();
cpu_khz = tsc_khz; cpu_khz = tsc_khz;
if (!tsc_khz) { if (!tsc_khz) {
mark_tsc_unstable("could not calculate TSC khz"); mark_tsc_unstable("could not calculate TSC khz");
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return; return;
} }
......
...@@ -409,7 +409,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr) ...@@ -409,7 +409,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
psize = page_level_size(level); psize = page_level_size(level);
pmask = page_level_mask(level); pmask = page_level_mask(level);
offset = virt_addr & ~pmask; offset = virt_addr & ~pmask;
phys_addr = pte_pfn(*pte) << PAGE_SHIFT; phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
return (phys_addr | offset); return (phys_addr | offset);
} }
EXPORT_SYMBOL_GPL(slow_virt_to_phys); EXPORT_SYMBOL_GPL(slow_virt_to_phys);
......
...@@ -106,6 +106,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table) ...@@ -106,6 +106,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstapic = MP_APIC_ALL;
mp_irq.dstirq = pentry->irq; mp_irq.dstirq = pentry->irq;
mp_save_irq(&mp_irq); mp_save_irq(&mp_irq);
mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
} }
return 0; return 0;
...@@ -176,6 +177,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) ...@@ -176,6 +177,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
mp_irq.dstapic = MP_APIC_ALL; mp_irq.dstapic = MP_APIC_ALL;
mp_irq.dstirq = pentry->irq; mp_irq.dstirq = pentry->irq;
mp_save_irq(&mp_irq); mp_save_irq(&mp_irq);
mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment