Commit 81340006 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/urgent' into x86/mce3

Conflicts:
	arch/x86/kernel/cpu/mcheck/mce_intel.c

Merge reason: merge with an urgent-branch MCE fix.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents 1bf7b31e fe955e5c
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
* touching registers they shouldn't be. * touching registers they shouldn't be.
*/ */
.code16 .code16gcc
.text .text
.globl intcall .globl intcall
.type intcall, @function .type intcall, @function
......
...@@ -29,9 +29,11 @@ extern void amd_iommu_detect(void); ...@@ -29,9 +29,11 @@ extern void amd_iommu_detect(void);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_flush_all_domains(void); extern void amd_iommu_flush_all_domains(void);
extern void amd_iommu_flush_all_devices(void); extern void amd_iommu_flush_all_devices(void);
extern void amd_iommu_shutdown(void);
#else #else
static inline int amd_iommu_init(void) { return -ENODEV; } static inline int amd_iommu_init(void) { return -ENODEV; }
static inline void amd_iommu_detect(void) { } static inline void amd_iommu_detect(void) { }
static inline void amd_iommu_shutdown(void) { }
#endif #endif
#endif /* _ASM_X86_AMD_IOMMU_H */ #endif /* _ASM_X86_AMD_IOMMU_H */
...@@ -257,7 +257,7 @@ typedef struct { ...@@ -257,7 +257,7 @@ typedef struct {
/** /**
* atomic64_read - read atomic64 variable * atomic64_read - read atomic64 variable
* @v: pointer of type atomic64_t * @ptr: pointer of type atomic64_t
* *
* Atomically reads the value of @v. * Atomically reads the value of @v.
* Doesn't imply a read memory barrier. * Doesn't imply a read memory barrier.
...@@ -294,7 +294,6 @@ atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val, ...@@ -294,7 +294,6 @@ atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val,
* atomic64_xchg - xchg atomic64 variable * atomic64_xchg - xchg atomic64 variable
* @ptr: pointer to type atomic64_t * @ptr: pointer to type atomic64_t
* @new_val: value to assign * @new_val: value to assign
* @old_val: old value that was there
* *
* Atomically xchgs the value of @ptr to @new_val and returns * Atomically xchgs the value of @ptr to @new_val and returns
* the old value. * the old value.
......
...@@ -434,6 +434,16 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) ...@@ -434,6 +434,16 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
} }
/* Flush the whole IO/TLB for a given protection domain - including PDE */
static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
{
u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
INC_STATS_COUNTER(domain_flush_single);
iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
}
/* /*
* This function is used to flush the IO/TLB for a given protection domain * This function is used to flush the IO/TLB for a given protection domain
* on every IOMMU in the system * on every IOMMU in the system
...@@ -1078,7 +1088,13 @@ static void attach_device(struct amd_iommu *iommu, ...@@ -1078,7 +1088,13 @@ static void attach_device(struct amd_iommu *iommu,
amd_iommu_pd_table[devid] = domain; amd_iommu_pd_table[devid] = domain;
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
/*
* We might boot into a crash-kernel here. The crashed kernel
* left the caches in the IOMMU dirty. So we have to flush
* here to evict all dirty stuff.
*/
iommu_queue_inv_dev_entry(iommu, devid); iommu_queue_inv_dev_entry(iommu, devid);
iommu_flush_tlb_pde(iommu, domain->id);
} }
/* /*
......
...@@ -260,6 +260,14 @@ static void iommu_enable(struct amd_iommu *iommu) ...@@ -260,6 +260,14 @@ static void iommu_enable(struct amd_iommu *iommu)
static void iommu_disable(struct amd_iommu *iommu) static void iommu_disable(struct amd_iommu *iommu)
{ {
/* Disable command buffer */
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
/* Disable event logging and event interrupts */
iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
/* Disable IOMMU hardware itself */
iommu_feature_disable(iommu, CONTROL_IOMMU_EN); iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
} }
...@@ -478,6 +486,10 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu) ...@@ -478,6 +486,10 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu)
memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
&entry, sizeof(entry)); &entry, sizeof(entry));
/* set head and tail to zero manually */
writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
} }
...@@ -1042,6 +1054,7 @@ static void enable_iommus(void) ...@@ -1042,6 +1054,7 @@ static void enable_iommus(void)
struct amd_iommu *iommu; struct amd_iommu *iommu;
for_each_iommu(iommu) { for_each_iommu(iommu) {
iommu_disable(iommu);
iommu_set_device_table(iommu); iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu); iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu); iommu_enable_event_buffer(iommu);
...@@ -1066,12 +1079,6 @@ static void disable_iommus(void) ...@@ -1066,12 +1079,6 @@ static void disable_iommus(void)
static int amd_iommu_resume(struct sys_device *dev) static int amd_iommu_resume(struct sys_device *dev)
{ {
/*
* Disable IOMMUs before reprogramming the hardware registers.
* IOMMU is still enabled from the resume kernel.
*/
disable_iommus();
/* re-load the hardware */ /* re-load the hardware */
enable_iommus(); enable_iommus();
...@@ -1079,8 +1086,8 @@ static int amd_iommu_resume(struct sys_device *dev) ...@@ -1079,8 +1086,8 @@ static int amd_iommu_resume(struct sys_device *dev)
* we have to flush after the IOMMUs are enabled because a * we have to flush after the IOMMUs are enabled because a
* disabled IOMMU will never execute the commands we send * disabled IOMMU will never execute the commands we send
*/ */
amd_iommu_flush_all_domains();
amd_iommu_flush_all_devices(); amd_iommu_flush_all_devices();
amd_iommu_flush_all_domains();
return 0; return 0;
} }
...@@ -1273,6 +1280,11 @@ int __init amd_iommu_init(void) ...@@ -1273,6 +1280,11 @@ int __init amd_iommu_init(void)
goto out; goto out;
} }
void amd_iommu_shutdown(void)
{
disable_iommus();
}
/**************************************************************************** /****************************************************************************
* *
* Early detect code. This code runs at IOMMU detection time in the DMA * Early detect code. This code runs at IOMMU detection time in the DMA
......
...@@ -462,7 +462,8 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) ...@@ -462,7 +462,8 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
static void static void
__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{ {
union entry_union eu; union entry_union eu = {{0, 0}};
eu.entry = e; eu.entry = e;
io_apic_write(apic, 0x11 + 2*pin, eu.w2); io_apic_write(apic, 0x11 + 2*pin, eu.w2);
io_apic_write(apic, 0x10 + 2*pin, eu.w1); io_apic_write(apic, 0x10 + 2*pin, eu.w1);
...@@ -3567,7 +3568,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) ...@@ -3567,7 +3568,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
struct irq_chip dmar_msi_type = { static struct irq_chip dmar_msi_type = {
.name = "DMAR_MSI", .name = "DMAR_MSI",
.unmask = dmar_msi_unmask, .unmask = dmar_msi_unmask,
.mask = dmar_msi_mask, .mask = dmar_msi_mask,
......
...@@ -853,6 +853,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -853,6 +853,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
numa_add_cpu(smp_processor_id()); numa_add_cpu(smp_processor_id());
#endif #endif
/* Cap the iomem address space to what is addressable on all CPUs */
iomem_resource.end &= (1ULL << c->x86_phys_bits) - 1;
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -1249,7 +1249,7 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c) ...@@ -1249,7 +1249,7 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c)
* Various K7s with broken bank 0 around. Always disable * Various K7s with broken bank 0 around. Always disable
* by default. * by default.
*/ */
if (c->x86 == 6) if (c->x86 == 6 && banks > 0)
bank[0] = 0; bank[0] = 0;
} }
......
...@@ -716,11 +716,15 @@ static void probe_nmi_watchdog(void) ...@@ -716,11 +716,15 @@ static void probe_nmi_watchdog(void)
wd_ops = &k7_wd_ops; wd_ops = &k7_wd_ops;
break; break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
/* /* Work around where perfctr1 doesn't have a working enable
* Work around Core Duo (Yonah) errata AE49 where perfctr1 * bit as described in the following errata:
* doesn't have a working enable bit. * AE49 Core Duo and Intel Core Solo 65 nm
* AN49 Intel Pentium Dual-Core
* AF49 Dual-Core Intel Xeon Processor LV
*/ */
if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) { if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) ||
((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 15 &&
boot_cpu_data.x86_mask == 4))) {
intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0; intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0;
intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0; intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0;
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/virtext.h> #include <asm/virtext.h>
#include <asm/iommu.h>
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
...@@ -103,5 +104,10 @@ void native_machine_crash_shutdown(struct pt_regs *regs) ...@@ -103,5 +104,10 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
#ifdef CONFIG_HPET_TIMER #ifdef CONFIG_HPET_TIMER
hpet_disable(); hpet_disable();
#endif #endif
#ifdef CONFIG_X86_64
pci_iommu_shutdown();
#endif
crash_save_cpu(regs, safe_smp_processor_id()); crash_save_cpu(regs, safe_smp_processor_id());
} }
...@@ -240,10 +240,35 @@ static void __init do_add_efi_memmap(void) ...@@ -240,10 +240,35 @@ static void __init do_add_efi_memmap(void)
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
int e820_type; int e820_type;
switch (md->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
if (md->attribute & EFI_MEMORY_WB) if (md->attribute & EFI_MEMORY_WB)
e820_type = E820_RAM; e820_type = E820_RAM;
else else
e820_type = E820_RESERVED; e820_type = E820_RESERVED;
break;
case EFI_ACPI_RECLAIM_MEMORY:
e820_type = E820_ACPI;
break;
case EFI_ACPI_MEMORY_NVS:
e820_type = E820_NVS;
break;
case EFI_UNUSABLE_MEMORY:
e820_type = E820_UNUSABLE;
break;
default:
/*
* EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
* EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
* EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
*/
e820_type = E820_RESERVED;
break;
}
e820_add_region(start, size, e820_type); e820_add_region(start, size, e820_type);
} }
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
......
...@@ -510,7 +510,8 @@ static int hpet_setup_irq(struct hpet_dev *dev) ...@@ -510,7 +510,8 @@ static int hpet_setup_irq(struct hpet_dev *dev)
{ {
if (request_irq(dev->irq, hpet_interrupt_handler, if (request_irq(dev->irq, hpet_interrupt_handler,
IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev)) IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
dev->name, dev))
return -1; return -1;
disable_irq(dev->irq); disable_irq(dev->irq);
......
...@@ -290,6 +290,8 @@ static int __init pci_iommu_init(void) ...@@ -290,6 +290,8 @@ static int __init pci_iommu_init(void)
void pci_iommu_shutdown(void) void pci_iommu_shutdown(void)
{ {
gart_iommu_shutdown(); gart_iommu_shutdown();
amd_iommu_shutdown();
} }
/* Must execute after PCI subsystem */ /* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init); fs_initcall(pci_iommu_init);
......
...@@ -951,11 +951,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -951,11 +951,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
tsk = current; tsk = current;
mm = tsk->mm; mm = tsk->mm;
prefetchw(&mm->mmap_sem);
/* Get the faulting address: */ /* Get the faulting address: */
address = read_cr2(); address = read_cr2();
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address))) if (unlikely(kmmio_fault(regs, address)))
return; return;
......
...@@ -527,7 +527,7 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end, ...@@ -527,7 +527,7 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
return phys_pud_init(pud, addr, end, page_size_mask); return phys_pud_init(pud, addr, end, page_size_mask);
} }
unsigned long __init unsigned long __meminit
kernel_physical_mapping_init(unsigned long start, kernel_physical_mapping_init(unsigned long start,
unsigned long end, unsigned long end,
unsigned long page_size_mask) unsigned long page_size_mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment