Commit bba072df authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "A couple of fixes and updates related to x86:

   - Fix the W+X check regression on XEN

   - The real fix for the low identity map trainwreck

   - Probe legacy PIC early instead of unconditionally allocating legacy
     irqs

   - Add cpu verification to long mode entry

   - Adjust the cache topology to AMD Fam17H systems

   - Let Merrifield use the TSC across S3"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu: Call verify_cpu() after having entered long mode too
  x86/setup: Fix low identity map for >= 2GB kernel range
  x86/mm: Skip the hypervisor range when walking PGD
  x86/AMD: Fix last level cache topology for AMD Fam17h systems
  x86/irq: Probe for PIC presence before allocating descs for legacy IRQs
  x86/cpu/intel: Enable X86_FEATURE_NONSTOP_TSC_S3 for Merrifield
parents 511601bd 04633df0
...@@ -60,6 +60,7 @@ struct legacy_pic { ...@@ -60,6 +60,7 @@ struct legacy_pic {
void (*mask_all)(void); void (*mask_all)(void);
void (*restore_mask)(void); void (*restore_mask)(void);
void (*init)(int auto_eoi); void (*init)(int auto_eoi);
int (*probe)(void);
int (*irq_pending)(unsigned int irq); int (*irq_pending)(unsigned int irq);
void (*make_irq)(unsigned int irq); void (*make_irq)(unsigned int irq);
}; };
......
...@@ -361,7 +361,11 @@ int __init arch_probe_nr_irqs(void) ...@@ -361,7 +361,11 @@ int __init arch_probe_nr_irqs(void)
if (nr < nr_irqs) if (nr < nr_irqs)
nr_irqs = nr; nr_irqs = nr;
return nr_legacy_irqs(); /*
* We don't know if PIC is present at this point so we need to do
* probe() to get the right number of legacy IRQs.
*/
return legacy_pic->probe();
} }
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
......
...@@ -352,6 +352,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) ...@@ -352,6 +352,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned bits; unsigned bits;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
unsigned int socket_id, core_complex_id;
bits = c->x86_coreid_bits; bits = c->x86_coreid_bits;
/* Low order bits define the core id (index of core in socket) */ /* Low order bits define the core id (index of core in socket) */
...@@ -361,6 +362,18 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) ...@@ -361,6 +362,18 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
/* use socket ID also for last level cache */ /* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
amd_get_topology(c); amd_get_topology(c);
/*
* Fix percpu cpu_llc_id here as LLC topology is different
* for Fam17h systems.
*/
if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
return;
socket_id = (c->apicid >> bits) - 1;
core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
#endif #endif
} }
......
...@@ -97,6 +97,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -97,6 +97,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
switch (c->x86_model) { switch (c->x86_model) {
case 0x27: /* Penwell */ case 0x27: /* Penwell */
case 0x35: /* Cloverview */ case 0x35: /* Cloverview */
case 0x4a: /* Merrifield */
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
break; break;
default: default:
......
...@@ -65,6 +65,9 @@ startup_64: ...@@ -65,6 +65,9 @@ startup_64:
* tables and then reload them. * tables and then reload them.
*/ */
/* Sanitize CPU configuration */
call verify_cpu
/* /*
* Compute the delta between the address I am compiled to run at and the * Compute the delta between the address I am compiled to run at and the
* address I am actually running at. * address I am actually running at.
...@@ -174,6 +177,9 @@ ENTRY(secondary_startup_64) ...@@ -174,6 +177,9 @@ ENTRY(secondary_startup_64)
* after the boot processor executes this code. * after the boot processor executes this code.
*/ */
/* Sanitize CPU configuration */
call verify_cpu
movq $(init_level4_pgt - __START_KERNEL_map), %rax movq $(init_level4_pgt - __START_KERNEL_map), %rax
1: 1:
...@@ -288,6 +294,8 @@ ENTRY(secondary_startup_64) ...@@ -288,6 +294,8 @@ ENTRY(secondary_startup_64)
pushq %rax # target address in negative space pushq %rax # target address in negative space
lretq lretq
#include "verify_cpu.S"
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* /*
* Boot CPU0 entry point. It's called from play_dead(). Everything has been set * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
......
...@@ -295,16 +295,11 @@ static void unmask_8259A(void) ...@@ -295,16 +295,11 @@ static void unmask_8259A(void)
raw_spin_unlock_irqrestore(&i8259A_lock, flags); raw_spin_unlock_irqrestore(&i8259A_lock, flags);
} }
static void init_8259A(int auto_eoi) static int probe_8259A(void)
{ {
unsigned long flags; unsigned long flags;
unsigned char probe_val = ~(1 << PIC_CASCADE_IR); unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
unsigned char new_val; unsigned char new_val;
i8259A_auto_eoi = auto_eoi;
raw_spin_lock_irqsave(&i8259A_lock, flags);
/* /*
* Check to see if we have a PIC. * Check to see if we have a PIC.
* Mask all except the cascade and read * Mask all except the cascade and read
...@@ -312,16 +307,28 @@ static void init_8259A(int auto_eoi) ...@@ -312,16 +307,28 @@ static void init_8259A(int auto_eoi)
* have a PIC, we will read 0xff as opposed to the * have a PIC, we will read 0xff as opposed to the
* value we wrote. * value we wrote.
*/ */
raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
outb(probe_val, PIC_MASTER_IMR); outb(probe_val, PIC_MASTER_IMR);
new_val = inb(PIC_MASTER_IMR); new_val = inb(PIC_MASTER_IMR);
if (new_val != probe_val) { if (new_val != probe_val) {
printk(KERN_INFO "Using NULL legacy PIC\n"); printk(KERN_INFO "Using NULL legacy PIC\n");
legacy_pic = &null_legacy_pic; legacy_pic = &null_legacy_pic;
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
} }
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return nr_legacy_irqs();
}
static void init_8259A(int auto_eoi)
{
unsigned long flags;
i8259A_auto_eoi = auto_eoi;
raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
/* /*
...@@ -379,6 +386,10 @@ static int legacy_pic_irq_pending_noop(unsigned int irq) ...@@ -379,6 +386,10 @@ static int legacy_pic_irq_pending_noop(unsigned int irq)
{ {
return 0; return 0;
} }
static int legacy_pic_probe(void)
{
return 0;
}
struct legacy_pic null_legacy_pic = { struct legacy_pic null_legacy_pic = {
.nr_legacy_irqs = 0, .nr_legacy_irqs = 0,
...@@ -388,6 +399,7 @@ struct legacy_pic null_legacy_pic = { ...@@ -388,6 +399,7 @@ struct legacy_pic null_legacy_pic = {
.mask_all = legacy_pic_noop, .mask_all = legacy_pic_noop,
.restore_mask = legacy_pic_noop, .restore_mask = legacy_pic_noop,
.init = legacy_pic_int_noop, .init = legacy_pic_int_noop,
.probe = legacy_pic_probe,
.irq_pending = legacy_pic_irq_pending_noop, .irq_pending = legacy_pic_irq_pending_noop,
.make_irq = legacy_pic_uint_noop, .make_irq = legacy_pic_uint_noop,
}; };
...@@ -400,6 +412,7 @@ struct legacy_pic default_legacy_pic = { ...@@ -400,6 +412,7 @@ struct legacy_pic default_legacy_pic = {
.mask_all = mask_8259A, .mask_all = mask_8259A,
.restore_mask = unmask_8259A, .restore_mask = unmask_8259A,
.init = init_8259A, .init = init_8259A,
.probe = probe_8259A,
.irq_pending = i8259A_irq_pending, .irq_pending = i8259A_irq_pending,
.make_irq = make_8259A_irq, .make_irq = make_8259A_irq,
}; };
......
...@@ -1188,7 +1188,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1188,7 +1188,7 @@ void __init setup_arch(char **cmdline_p)
*/ */
clone_pgd_range(initial_page_table, clone_pgd_range(initial_page_table,
swapper_pg_dir + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS); min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
#endif #endif
tboot_probe(); tboot_probe();
......
...@@ -34,10 +34,11 @@ ...@@ -34,10 +34,11 @@
#include <asm/msr-index.h> #include <asm/msr-index.h>
verify_cpu: verify_cpu:
pushfl # Save caller passed flags pushf # Save caller passed flags
pushl $0 # Kill any dangerous flags push $0 # Kill any dangerous flags
popfl popf
#ifndef __x86_64__
pushfl # standard way to check for cpuid pushfl # standard way to check for cpuid
popl %eax popl %eax
movl %eax,%ebx movl %eax,%ebx
...@@ -48,6 +49,7 @@ verify_cpu: ...@@ -48,6 +49,7 @@ verify_cpu:
popl %eax popl %eax
cmpl %eax,%ebx cmpl %eax,%ebx
jz verify_cpu_no_longmode # cpu has no cpuid jz verify_cpu_no_longmode # cpu has no cpuid
#endif
movl $0x0,%eax # See if cpuid 1 is implemented movl $0x0,%eax # See if cpuid 1 is implemented
cpuid cpuid
...@@ -130,10 +132,10 @@ verify_cpu_sse_test: ...@@ -130,10 +132,10 @@ verify_cpu_sse_test:
jmp verify_cpu_sse_test # try again jmp verify_cpu_sse_test # try again
verify_cpu_no_longmode: verify_cpu_no_longmode:
popfl # Restore caller passed flags popf # Restore caller passed flags
movl $1,%eax movl $1,%eax
ret ret
verify_cpu_sse_ok: verify_cpu_sse_ok:
popfl # Restore caller passed flags popf # Restore caller passed flags
xorl %eax, %eax xorl %eax, %eax
ret ret
...@@ -358,6 +358,21 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr, ...@@ -358,6 +358,21 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
#define pgd_none(a) pud_none(__pud(pgd_val(a))) #define pgd_none(a) pud_none(__pud(pgd_val(a)))
#endif #endif
#ifdef CONFIG_X86_64
static inline bool is_hypervisor_range(int idx)
{
/*
* ffff800000000000 - ffff87ffffffffff is reserved for
* the hypervisor.
*/
return paravirt_enabled() &&
(idx >= pgd_index(__PAGE_OFFSET) - 16) &&
(idx < pgd_index(__PAGE_OFFSET));
}
#else
static inline bool is_hypervisor_range(int idx) { return false; }
#endif
static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
bool checkwx) bool checkwx)
{ {
...@@ -381,7 +396,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, ...@@ -381,7 +396,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
for (i = 0; i < PTRS_PER_PGD; i++) { for (i = 0; i < PTRS_PER_PGD; i++) {
st.current_address = normalize_addr(i * PGD_LEVEL_MULT); st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
if (!pgd_none(*start)) { if (!pgd_none(*start) && !is_hypervisor_range(i)) {
if (pgd_large(*start) || !pgd_present(*start)) { if (pgd_large(*start) || !pgd_present(*start)) {
prot = pgd_flags(*start); prot = pgd_flags(*start);
note_page(m, &st, __pgprot(prot), 1); note_page(m, &st, __pgprot(prot), 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment