Commit 101d5b71 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/signal' into core/signal

Conflicts:
	arch/x86/kernel/cpu/feature_names.c
	arch/x86/kernel/setup.c
	drivers/pci/intel-iommu.c
	include/asm-x86/cpufeature.h
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents cec5eb7b e6babb6b
......@@ -1425,6 +1425,12 @@ and is between 256 and 4096 characters. It is defined in the file
nolapic_timer [X86-32,APIC] Do not use the local APIC timer.
nox2apic [X86-64,APIC] Do not enable x2APIC mode.
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
default x2apic cluster mode on platforms
supporting x2apic.
noltlbs [PPC] Do not use large page/tlb entries for kernel
lowmem mapping on PPC40x.
......
......@@ -29,6 +29,7 @@ config X86
select HAVE_FTRACE
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
select HAVE_ARCH_KGDB if !X86_VOYAGER
select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_EFFICIENT_UNALIGNED_ACCESS
......@@ -1643,6 +1644,14 @@ config DMAR_FLOPPY_WA
workaround will setup a 1:1 mapping for the first
16M to make floppy (an ISA device) work.
config INTR_REMAP
bool "Support for Interrupt Remapping (EXPERIMENTAL)"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
to support platforms with CPU's having > 8 bit APIC ID, say Y.
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig"
......
......@@ -418,3 +418,73 @@ config X86_MINIMUM_CPU_FAMILY
config X86_DEBUGCTLMSR
def_bool y
depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386)
menuconfig PROCESSOR_SELECT
default y
bool "Supported processor vendors" if EMBEDDED
help
This lets you choose what x86 vendor support code your kernel
will include.
config CPU_SUP_INTEL_32
default y
bool "Support Intel processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for Intel processors
config CPU_SUP_INTEL_64
default y
bool "Support Intel processors" if PROCESSOR_SELECT
depends on 64BIT
help
This enables extended support for Intel processors
config CPU_SUP_CYRIX_32
default y
bool "Support Cyrix processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for Cyrix processors
config CPU_SUP_AMD_32
default y
bool "Support AMD processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for AMD processors
config CPU_SUP_AMD_64
default y
bool "Support AMD processors" if PROCESSOR_SELECT
depends on 64BIT
help
This enables extended support for AMD processors
config CPU_SUP_CENTAUR_32
default y
bool "Support Centaur processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for Centaur processors
config CPU_SUP_CENTAUR_64
default y
bool "Support Centaur processors" if PROCESSOR_SELECT
depends on 64BIT
help
This enables extended support for Centaur processors
config CPU_SUP_TRANSMETA_32
default y
bool "Support Transmeta processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for Transmeta processors
config CPU_SUP_UMC_32
default y
bool "Support UMC processors" if PROCESSOR_SELECT
depends on !64BIT
help
This enables extended support for UMC processors
......@@ -137,14 +137,15 @@ relocated:
*/
movl output_len(%ebx), %eax
pushl %eax
# push arguments for decompress_kernel:
pushl %ebp # output address
movl input_len(%ebx), %eax
pushl %eax # input_len
leal input_data(%ebx), %eax
pushl %eax # input_data
leal boot_heap(%ebx), %eax
pushl %eax # heap area as third argument
pushl %esi # real mode pointer as second arg
pushl %eax # heap area
pushl %esi # real mode pointer
call decompress_kernel
addl $20, %esp
popl %ecx
......
......@@ -16,7 +16,7 @@
*/
#undef CONFIG_PARAVIRT
#ifdef CONFIG_X86_32
#define _ASM_DESC_H_ 1
#define ASM_X86__DESC_H 1
#endif
#ifdef CONFIG_X86_64
......@@ -27,7 +27,7 @@
#include <linux/linkage.h>
#include <linux/screen_info.h>
#include <linux/elf.h>
#include <asm/io.h>
#include <linux/io.h>
#include <asm/page.h>
#include <asm/boot.h>
#include <asm/bootparam.h>
......@@ -251,7 +251,7 @@ static void __putstr(int error, const char *s)
y--;
}
} else {
vidmem [(x + cols * y) * 2] = c;
vidmem[(x + cols * y) * 2] = c;
if (++x >= cols) {
x = 0;
if (++y >= lines) {
......@@ -277,7 +277,8 @@ static void *memset(void *s, int c, unsigned n)
int i;
char *ss = s;
for (i = 0; i < n; i++) ss[i] = c;
for (i = 0; i < n; i++)
ss[i] = c;
return s;
}
......@@ -287,7 +288,8 @@ static void *memcpy(void *dest, const void *src, unsigned n)
const char *s = src;
char *d = dest;
for (i = 0; i < n; i++) d[i] = s[i];
for (i = 0; i < n; i++)
d[i] = s[i];
return dest;
}
......
......@@ -15,7 +15,7 @@
#include <stdio.h>
#include "../kernel/cpu/feature_names.c"
#include "../kernel/cpu/capflags.c"
#if NCAPFLAGS > 8
# error "Need to adjust the boot code handling of CPUID strings"
......
/*
* Written by: Garry Forsgren, Unisys Corporation
* Natalie Protasevich, Unisys Corporation
* This file contains the code to configure and interface
* This file contains the code to configure and interface
* with Unisys ES7000 series hardware system manager.
*
* Copyright (c) 2003 Unisys Corporation. All Rights Reserved.
......@@ -18,7 +18,7 @@
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Unisys Corporation, Township Line & Union Meeting
* Contact information: Unisys Corporation, Township Line & Union Meeting
* Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or:
*
* http://www.unisys.com
......@@ -41,7 +41,7 @@
#define MIP_VALID 0x0100000000000000ULL
#define MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff)
#define MIP_RD_LO(VALUE) (VALUE & 0xffffffff)
#define MIP_RD_LO(VALUE) (VALUE & 0xffffffff)
struct mip_reg_info {
unsigned long long mip_info;
......@@ -51,11 +51,11 @@ struct mip_reg_info {
};
struct part_info {
unsigned char type;
unsigned char type;
unsigned char length;
unsigned char part_id;
unsigned char apic_mode;
unsigned long snum;
unsigned long snum;
char ptype[16];
char sname[64];
char pname[64];
......@@ -68,11 +68,11 @@ struct psai {
};
struct es7000_mem_info {
unsigned char type;
unsigned char type;
unsigned char length;
unsigned char resv[6];
unsigned long long start;
unsigned long long size;
unsigned long long start;
unsigned long long size;
};
struct es7000_oem_table {
......@@ -106,7 +106,7 @@ struct mip_reg {
};
#define MIP_SW_APIC 0x1020b
#define MIP_FUNC(VALUE) (VALUE & 0xff)
#define MIP_FUNC(VALUE) (VALUE & 0xff)
extern int parse_unisys_oem (char *oemptr);
extern void setup_unisys(void);
......
......@@ -72,7 +72,7 @@ es7000_rename_gsi(int ioapic, int gsi)
base += nr_ioapic_registers[i];
}
if (!ioapic && (gsi < 16))
if (!ioapic && (gsi < 16))
gsi += base;
return gsi;
}
......
......@@ -85,8 +85,10 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
dump->regs.ax = regs->ax;
dump->regs.ds = current->thread.ds;
dump->regs.es = current->thread.es;
asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs;
asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs;
savesegment(fs, fs);
dump->regs.fs = fs;
savesegment(gs, gs);
dump->regs.gs = gs;
dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip;
dump->regs.cs = regs->cs;
......@@ -430,8 +432,9 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->start_stack =
(unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
/* start thread */
asm volatile("movl %0,%%fs" :: "r" (0)); \
asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS));
loadsegment(fs, 0);
loadsegment(ds, __USER32_DS);
loadsegment(es, __USER32_DS);
load_gs_index(0);
(regs)->ip = ex.a_entry;
(regs)->sp = current->mm->start_stack;
......
......@@ -179,9 +179,10 @@ struct sigframe
u32 pretcode;
int sig;
struct sigcontext_ia32 sc;
struct _fpstate_ia32 fpstate;
struct _fpstate_ia32 fpstate_unused; /* look at kernel/sigframe.h */
unsigned int extramask[_COMPAT_NSIG_WORDS-1];
char retcode[8];
/* fp state follows here */
};
struct rt_sigframe
......@@ -192,8 +193,8 @@ struct rt_sigframe
u32 puc;
compat_siginfo_t info;
struct ucontext_ia32 uc;
struct _fpstate_ia32 fpstate;
char retcode[8];
/* fp state follows here */
};
#define COPY(x) { \
......@@ -206,7 +207,7 @@ struct rt_sigframe
{ unsigned int cur; \
unsigned short pre; \
err |= __get_user(pre, &sc->seg); \
asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \
savesegment(seg, cur); \
pre |= mask; \
if (pre != cur) loadsegment(seg, pre); }
......@@ -215,7 +216,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
unsigned int *peax)
{
unsigned int tmpflags, gs, oldgs, err = 0;
struct _fpstate_ia32 __user *buf;
void __user *buf;
u32 tmp;
/* Always make any pending restarted system calls return -EINTR */
......@@ -235,7 +236,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
*/
err |= __get_user(gs, &sc->gs);
gs |= 3;
asm("movl %%gs,%0" : "=r" (oldgs));
savesegment(gs, oldgs);
if (gs != oldgs)
load_gs_index(gs);
......@@ -259,26 +260,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
err |= __get_user(tmp, &sc->fpstate);
buf = compat_ptr(tmp);
if (buf) {
if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
goto badframe;
err |= restore_i387_ia32(buf);
} else {
struct task_struct *me = current;
if (used_math()) {
clear_fpu(me);
clear_used_math();
}
}
err |= restore_i387_xstate_ia32(buf);
err |= __get_user(tmp, &sc->ax);
*peax = tmp;
return err;
badframe:
return 1;
}
asmlinkage long sys32_sigreturn(struct pt_regs *regs)
......@@ -350,19 +337,18 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
*/
static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
struct _fpstate_ia32 __user *fpstate,
void __user *fpstate,
struct pt_regs *regs, unsigned int mask)
{
int tmp, err = 0;
tmp = 0;
__asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
savesegment(gs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
__asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
savesegment(fs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
__asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp));
savesegment(ds, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
__asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp));
savesegment(es, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->es);
err |= __put_user((u32)regs->di, &sc->di);
......@@ -381,7 +367,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
err |= __put_user((u32)regs->flags, &sc->flags);
err |= __put_user((u32)regs->sp, &sc->sp_at_signal);
tmp = save_i387_ia32(fpstate);
tmp = save_i387_xstate_ia32(fpstate);
if (tmp < 0)
err = -EFAULT;
else {
......@@ -402,7 +388,8 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
* Determine which stack to use..
*/
static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
size_t frame_size,
void **fpstate)
{
unsigned long sp;
......@@ -421,6 +408,11 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
ka->sa.sa_restorer)
sp = (unsigned long) ka->sa.sa_restorer;
if (used_math()) {
sp = sp - sig_xstate_ia32_size;
*fpstate = (struct _fpstate_ia32 *) sp;
}
sp -= frame_size;
/* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0. */
......@@ -434,6 +426,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
struct sigframe __user *frame;
void __user *restorer;
int err = 0;
void __user *fpstate = NULL;
/* copy_to_user optimizes that into a single 8 byte store */
static const struct {
......@@ -448,25 +441,21 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
0,
};
frame = get_sigframe(ka, regs, sizeof(*frame));
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
return -EFAULT;
err |= __put_user(sig, &frame->sig);
if (err)
goto give_sigsegv;
if (__put_user(sig, &frame->sig))
return -EFAULT;
err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs,
set->sig[0]);
if (err)
goto give_sigsegv;
if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
return -EFAULT;
if (_COMPAT_NSIG_WORDS > 1) {
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
if (err)
goto give_sigsegv;
if (__copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask)))
return -EFAULT;
}
if (ka->sa.sa_flags & SA_RESTORER) {
......@@ -487,7 +476,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
*/
err |= __copy_to_user(frame->retcode, &code, 8);
if (err)
goto give_sigsegv;
return -EFAULT;
/* Set up registers for signal handler */
regs->sp = (unsigned long) frame;
......@@ -498,8 +487,8 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
regs->dx = 0;
regs->cx = 0;
asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
loadsegment(ds, __USER32_DS);
loadsegment(es, __USER32_DS);
regs->cs = __USER32_CS;
regs->ss = __USER32_DS;
......@@ -510,10 +499,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
#endif
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
......@@ -522,6 +507,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe __user *frame;
void __user *restorer;
int err = 0;
void __user *fpstate = NULL;
/* __copy_to_user optimizes that into a single 8 byte store */
static const struct {
......@@ -537,30 +523,33 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
0,
};
frame = get_sigframe(ka, regs, sizeof(*frame));
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
return -EFAULT;
err |= __put_user(sig, &frame->sig);
err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info);
if (err)
goto give_sigsegv;
return -EFAULT;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
if (cpu_has_xsave)
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
else
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
goto give_sigsegv;
return -EFAULT;
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
......@@ -575,7 +564,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
*/
err |= __copy_to_user(frame->retcode, &code, 8);
if (err)
goto give_sigsegv;
return -EFAULT;
/* Set up registers for signal handler */
regs->sp = (unsigned long) frame;
......@@ -591,8 +580,8 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->dx = (unsigned long) &frame->info;
regs->cx = (unsigned long) &frame->uc;
asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
loadsegment(ds, __USER32_DS);
loadsegment(es, __USER32_DS);
regs->cs = __USER32_CS;
regs->ss = __USER32_DS;
......@@ -603,8 +592,4 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
#endif
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
......@@ -38,7 +38,7 @@ obj-y += tsc.o io_delay.o rtc.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-y += process.o
obj-y += i387.o
obj-y += i387.o xsave.o
obj-y += ptrace.o
obj-y += ds.o
obj-$(CONFIG_X86_32) += tls.o
......@@ -104,6 +104,8 @@ obj-$(CONFIG_OLPC) += olpc.o
ifeq ($(CONFIG_X86_64),y)
obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
obj-y += bios_uv.o
obj-y += genx2apic_cluster.o
obj-y += genx2apic_phys.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
obj-$(CONFIG_AUDIT) += audit_64.o
......
......@@ -58,7 +58,6 @@ EXPORT_SYMBOL(acpi_disabled);
#ifdef CONFIG_X86_64
#include <asm/proto.h>
#include <asm/genapic.h>
#else /* X86 */
......@@ -97,8 +96,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
#warning ACPI uses CMPXCHG, i486 and later hardware
#endif
static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
......@@ -160,6 +157,8 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
struct acpi_mcfg_allocation *pci_mmcfg_config;
int pci_mmcfg_config_num;
static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
{
if (!strcmp(mcfg->header.oem_id, "SGI"))
......@@ -775,7 +774,7 @@ static void __init acpi_register_lapic_address(unsigned long address)
set_fixmap_nocache(FIX_APIC_BASE, address);
if (boot_cpu_physical_apicid == -1U) {
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
boot_cpu_physical_apicid = read_apic_id();
#ifdef CONFIG_X86_32
apic_version[boot_cpu_physical_apicid] =
GET_APIC_VERSION(apic_read(APIC_LVR));
......@@ -1351,7 +1350,9 @@ static void __init acpi_process_madt(void)
acpi_ioapic = 1;
smp_found_config = 1;
#ifdef CONFIG_X86_32
setup_apic_routing();
#endif
}
}
if (error == -EINVAL) {
......
......@@ -145,13 +145,18 @@ static int modern_apic(void)
return lapic_get_version() >= 0x14;
}
void apic_wait_icr_idle(void)
/*
* Paravirt kernels also might be using these below ops. So we still
* use generic apic_read()/apic_write(), which might be pointing to different
* ops in PARAVIRT case.
*/
void xapic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
u32 safe_apic_wait_icr_idle(void)
u32 safe_xapic_wait_icr_idle(void)
{
u32 send_status;
int timeout;
......@@ -167,6 +172,34 @@ u32 safe_apic_wait_icr_idle(void)
return send_status;
}
void xapic_icr_write(u32 low, u32 id)
{
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
apic_write(APIC_ICR, low);
}
u64 xapic_icr_read(void)
{
u32 icr1, icr2;
icr2 = apic_read(APIC_ICR2);
icr1 = apic_read(APIC_ICR);
return icr1 | ((u64)icr2 << 32);
}
static struct apic_ops xapic_ops = {
.read = native_apic_mem_read,
.write = native_apic_mem_write,
.icr_read = xapic_icr_read,
.icr_write = xapic_icr_write,
.wait_icr_idle = xapic_wait_icr_idle,
.safe_wait_icr_idle = safe_xapic_wait_icr_idle,
};
struct apic_ops __read_mostly *apic_ops = &xapic_ops;
EXPORT_SYMBOL_GPL(apic_ops);
/**
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
*/
......@@ -1205,7 +1238,7 @@ void __init init_apic_mappings(void)
* default configuration (or the MP table is broken).
*/
if (boot_cpu_physical_apicid == -1U)
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
boot_cpu_physical_apicid = read_apic_id();
}
......@@ -1242,7 +1275,7 @@ int __init APIC_init_uniprocessor(void)
* might be zero if read from MP tables. Get it from LAPIC.
*/
#ifdef CONFIG_CRASH_DUMP
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
boot_cpu_physical_apicid = read_apic_id();
#endif
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
......
......@@ -27,6 +27,7 @@
#include <linux/clockchips.h>
#include <linux/acpi_pmtmr.h>
#include <linux/module.h>
#include <linux/dmar.h>
#include <asm/atomic.h>
#include <asm/smp.h>
......@@ -39,6 +40,7 @@
#include <asm/proto.h>
#include <asm/timex.h>
#include <asm/apic.h>
#include <asm/i8259.h>
#include <mach_ipi.h>
#include <mach_apic.h>
......@@ -46,6 +48,11 @@
static int disable_apic_timer __cpuinitdata;
static int apic_calibrate_pmtmr __initdata;
int disable_apic;
int disable_x2apic;
int x2apic;
/* x2apic enabled before OS handover */
int x2apic_preenabled;
/* Local APIC timer works in C2 */
int local_apic_timer_c2_ok;
......@@ -118,13 +125,13 @@ static int modern_apic(void)
return lapic_get_version() >= 0x14;
}
void apic_wait_icr_idle(void)
void xapic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
cpu_relax();
}
u32 safe_apic_wait_icr_idle(void)
u32 safe_xapic_wait_icr_idle(void)
{
u32 send_status;
int timeout;
......@@ -140,6 +147,69 @@ u32 safe_apic_wait_icr_idle(void)
return send_status;
}
void xapic_icr_write(u32 low, u32 id)
{
apic_write(APIC_ICR2, id << 24);
apic_write(APIC_ICR, low);
}
u64 xapic_icr_read(void)
{
u32 icr1, icr2;
icr2 = apic_read(APIC_ICR2);
icr1 = apic_read(APIC_ICR);
return (icr1 | ((u64)icr2 << 32));
}
static struct apic_ops xapic_ops = {
.read = native_apic_mem_read,
.write = native_apic_mem_write,
.icr_read = xapic_icr_read,
.icr_write = xapic_icr_write,
.wait_icr_idle = xapic_wait_icr_idle,
.safe_wait_icr_idle = safe_xapic_wait_icr_idle,
};
struct apic_ops __read_mostly *apic_ops = &xapic_ops;
EXPORT_SYMBOL_GPL(apic_ops);
static void x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return;
}
static u32 safe_x2apic_wait_icr_idle(void)
{
/* no need to wait for icr idle in x2apic */
return 0;
}
void x2apic_icr_write(u32 low, u32 id)
{
wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
}
u64 x2apic_icr_read(void)
{
unsigned long val;
rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
return val;
}
static struct apic_ops x2apic_ops = {
.read = native_apic_msr_read,
.write = native_apic_msr_write,
.icr_read = x2apic_icr_read,
.icr_write = x2apic_icr_write,
.wait_icr_idle = x2apic_wait_icr_idle,
.safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
};
/**
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
*/
......@@ -629,10 +699,10 @@ int __init verify_local_APIC(void)
/*
* The ID register is read/write in a real APIC.
*/
reg0 = read_apic_id();
reg0 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
reg1 = read_apic_id();
reg1 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
apic_write(APIC_ID, reg0);
if (reg1 != (reg0 ^ APIC_ID_MASK))
......@@ -833,6 +903,125 @@ void __cpuinit end_local_APIC_setup(void)
apic_pm_activate();
}
void check_x2apic(void)
{
int msr, msr2;
rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (msr & X2APIC_ENABLE) {
printk("x2apic enabled by BIOS, switching to x2apic ops\n");
x2apic_preenabled = x2apic = 1;
apic_ops = &x2apic_ops;
}
}
void enable_x2apic(void)
{
int msr, msr2;
rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (!(msr & X2APIC_ENABLE)) {
printk("Enabling x2apic\n");
wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
}
}
void enable_IR_x2apic(void)
{
#ifdef CONFIG_INTR_REMAP
int ret;
unsigned long flags;
if (!cpu_has_x2apic)
return;
if (!x2apic_preenabled && disable_x2apic) {
printk(KERN_INFO
"Skipped enabling x2apic and Interrupt-remapping "
"because of nox2apic\n");
return;
}
if (x2apic_preenabled && disable_x2apic)
panic("Bios already enabled x2apic, can't enforce nox2apic");
if (!x2apic_preenabled && skip_ioapic_setup) {
printk(KERN_INFO
"Skipped enabling x2apic and Interrupt-remapping "
"because of skipping io-apic setup\n");
return;
}
ret = dmar_table_init();
if (ret) {
printk(KERN_INFO
"dmar_table_init() failed with %d:\n", ret);
if (x2apic_preenabled)
panic("x2apic enabled by bios. But IR enabling failed");
else
printk(KERN_INFO
"Not enabling x2apic,Intr-remapping\n");
return;
}
local_irq_save(flags);
mask_8259A();
save_mask_IO_APIC_setup();
ret = enable_intr_remapping(1);
if (ret && x2apic_preenabled) {
local_irq_restore(flags);
panic("x2apic enabled by bios. But IR enabling failed");
}
if (ret)
goto end;
if (!x2apic) {
x2apic = 1;
apic_ops = &x2apic_ops;
enable_x2apic();
}
end:
if (ret)
/*
* IR enabling failed
*/
restore_IO_APIC_setup();
else
reinit_intr_remapped_IO_APIC(x2apic_preenabled);
unmask_8259A();
local_irq_restore(flags);
if (!ret) {
if (!x2apic_preenabled)
printk(KERN_INFO
"Enabled x2apic and interrupt-remapping\n");
else
printk(KERN_INFO
"Enabled Interrupt-remapping\n");
} else
printk(KERN_ERR
"Failed to enable Interrupt-remapping and x2apic\n");
#else
if (!cpu_has_x2apic)
return;
if (x2apic_preenabled)
panic("x2apic enabled prior OS handover,"
" enable CONFIG_INTR_REMAP");
printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
" and x2apic\n");
#endif
return;
}
/*
* Detect and enable local APICs on non-SMP boards.
* Original code written by Keir Fraser.
......@@ -872,7 +1061,7 @@ void __init early_init_lapic_mapping(void)
* Fetch the APIC ID of the BSP in case we have a
* default configuration (or the MP table is broken).
*/
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
boot_cpu_physical_apicid = read_apic_id();
}
/**
......@@ -880,6 +1069,11 @@ void __init early_init_lapic_mapping(void)
*/
void __init init_apic_mappings(void)
{
if (x2apic) {
boot_cpu_physical_apicid = read_apic_id();
return;
}
/*
* If no local APIC can be found then set up a fake all
* zeroes page to simulate the local APIC and another
......@@ -899,7 +1093,7 @@ void __init init_apic_mappings(void)
* Fetch the APIC ID of the BSP in case we have a
* default configuration (or the MP table is broken).
*/
boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id());
boot_cpu_physical_apicid = read_apic_id();
}
/*
......@@ -918,6 +1112,9 @@ int __init APIC_init_uniprocessor(void)
return -1;
}
enable_IR_x2apic();
setup_apic_routing();
verify_local_APIC();
connect_bsp_APIC();
......@@ -1093,6 +1290,11 @@ void __cpuinit generic_processor_info(int apicid, int version)
cpu_set(cpu, cpu_present_map);
}
int hard_smp_processor_id(void)
{
return read_apic_id();
}
/*
* Power management
*/
......@@ -1129,7 +1331,7 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
maxlvt = lapic_get_maxlvt();
apic_pm_state.apic_id = read_apic_id();
apic_pm_state.apic_id = apic_read(APIC_ID);
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
......@@ -1164,10 +1366,14 @@ static int lapic_resume(struct sys_device *dev)
maxlvt = lapic_get_maxlvt();
local_irq_save(flags);
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
wrmsr(MSR_IA32_APICBASE, l, h);
if (!x2apic) {
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
wrmsr(MSR_IA32_APICBASE, l, h);
} else
enable_x2apic();
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
apic_write(APIC_ID, apic_pm_state.apic_id);
apic_write(APIC_DFR, apic_pm_state.apic_dfr);
......@@ -1307,6 +1513,15 @@ __cpuinit int apic_is_clustered_box(void)
return (clusters > 2);
}
static __init int setup_nox2apic(char *str)
{
disable_x2apic = 1;
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_X2APIC);
return 0;
}
early_param("nox2apic", setup_nox2apic);
/*
* APIC command line parameters
*/
......
......@@ -228,7 +228,6 @@
#include <linux/suspend.h>
#include <linux/kthread.h>
#include <linux/jiffies.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
......
......@@ -22,7 +22,7 @@
#define __NO_STUBS 1
#undef __SYSCALL
#undef _ASM_X86_64_UNISTD_H_
#undef ASM_X86__UNISTD_64_H
#define __SYSCALL(nr, sym) [nr] = 1,
static char syscalls[] = {
#include <asm/unistd.h>
......
......@@ -25,11 +25,11 @@ x86_bios_strerror(long status)
{
const char *str;
switch (status) {
case 0: str = "Call completed without error"; break;
case -1: str = "Not implemented"; break;
case -2: str = "Invalid argument"; break;
case -3: str = "Call completed with error"; break;
default: str = "Unknown BIOS status code"; break;
case 0: str = "Call completed without error"; break;
case -1: str = "Not implemented"; break;
case -2: str = "Invalid argument"; break;
case -3: str = "Call completed with error"; break;
default: str = "Unknown BIOS status code"; break;
}
return str;
}
......
......@@ -3,22 +3,32 @@
#
obj-y := intel_cacheinfo.o addon_cpuid_features.o
obj-y += proc.o feature_names.o
obj-y += proc.o capflags.o powerflags.o
obj-$(CONFIG_X86_32) += common.o bugs.o
obj-$(CONFIG_X86_32) += common.o bugs.o cmpxchg.o
obj-$(CONFIG_X86_64) += common_64.o bugs_64.o
obj-$(CONFIG_X86_32) += amd.o
obj-$(CONFIG_X86_64) += amd_64.o
obj-$(CONFIG_X86_32) += cyrix.o
obj-$(CONFIG_X86_32) += centaur.o
obj-$(CONFIG_X86_64) += centaur_64.o
obj-$(CONFIG_X86_32) += transmeta.o
obj-$(CONFIG_X86_32) += intel.o
obj-$(CONFIG_X86_64) += intel_64.o
obj-$(CONFIG_X86_32) += umc.o
obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o
obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o
obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o
obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
quiet_cmd_mkcapflags = MKCAP $@
cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
cpufeature = $(src)/../../../../include/asm-x86/cpufeature.h
targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE
$(call if_changed,mkcapflags)
/*
* cmpxchg*() fallbacks for CPU not supporting these instructions
*/
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/module.h>
#ifndef CONFIG_X86_CMPXCHG
unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
{
u8 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u8 *)ptr;
if (prev == old)
*(u8 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u8);
unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
{
u16 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u16 *)ptr;
if (prev == old)
*(u16 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u16);
unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
{
u32 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u32 *)ptr;
if (prev == old)
*(u32 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u32);
#endif
#ifndef CONFIG_X86_CMPXCHG64
unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
{
u64 prev;
unsigned long flags;
/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_486_u64);
#endif
......@@ -355,6 +355,35 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
clear_cpu_cap(c, X86_FEATURE_NOPL);
}
/*
* The NOPL instruction is supposed to exist on all CPUs with
* family >= 6, unfortunately, that's not true in practice because
* of early VIA chips and (more importantly) broken virtualizers that
* are not easy to detect. Hence, probe for it based on first
* principles.
*/
static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
{
const u32 nopl_signature = 0x888c53b1; /* Random number */
u32 has_nopl = nopl_signature;
clear_cpu_cap(c, X86_FEATURE_NOPL);
if (c->x86 >= 6) {
asm volatile("\n"
"1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
"2:\n"
" .section .fixup,\"ax\"\n"
"3: xor %0,%0\n"
" jmp 2b\n"
" .previous\n"
_ASM_EXTABLE(1b,3b)
: "+a" (has_nopl));
if (has_nopl == nopl_signature)
set_cpu_cap(c, X86_FEATURE_NOPL);
}
}
static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
......@@ -723,9 +752,20 @@ void __cpuinit cpu_init(void)
/*
* Force FPU initialization:
*/
current_thread_info()->status = 0;
if (cpu_has_xsave)
current_thread_info()->status = TS_XSAVE;
else
current_thread_info()->status = 0;
clear_used_math();
mxcsr_feature_mask_init();
/*
* Boot processor to setup the FP and extended state context info.
*/
if (!smp_processor_id())
init_thread_xstate();
xsave_init();
}
#ifdef CONFIG_HOTPLUG_CPU
......
......@@ -636,6 +636,8 @@ void __cpuinit cpu_init(void)
barrier();
check_efer();
if (cpu != 0 && x2apic)
enable_x2apic();
/*
* set up and load the per-CPU TSS
......
......@@ -121,7 +121,7 @@ static void __cpuinit set_cx86_reorder(void)
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* Load/Store Serialize to mem access disable (=reorder it) */
setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
/* set load/store serialize from 1GB to 4GB */
ccr3 |= 0xe0;
setCx86(CX86_CCR3, ccr3);
......@@ -132,11 +132,11 @@ static void __cpuinit set_cx86_memwb(void)
printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
/* CCR2 bit 2: unlock NW bit */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
/* set 'Not Write-through' */
write_cr0(read_cr0() | X86_CR0_NW);
/* CCR2 bit 2: lock NW bit and set WT1 */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
}
/*
......@@ -150,14 +150,14 @@ static void __cpuinit geode_configure(void)
local_irq_save(flags);
/* Suspend on halt power saving and enable #SUSP pin */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* FPU fast, DTE cache, Mem bypass */
setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
set_cx86_memwb();
......@@ -291,7 +291,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
/* Enable cxMMX extensions (GX1 Datasheet 54) */
setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
/*
* GXm : 0x30 ... 0x5f GXm datasheet 51
......@@ -314,7 +314,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
if (dir1 > 7) {
dir0_msn++; /* M II */
/* Enable MMX extensions (App note 108) */
setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
} else {
c->coma_bug = 1; /* 6x86MX, it has the bug. */
}
......@@ -429,7 +429,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* enable cpuid */
setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
local_irq_restore(flags);
}
......
/*
* Strings for the various x86 capability flags.
*
* This file must not contain any executable code.
*/
#include <asm/cpufeature.h>
/*
* These flag bits must match the definitions in <asm/cpufeature.h>.
* NULL means this bit is undefined or reserved; either way it doesn't
* have meaning as far as Linux is concerned. Note that it's important
* to realize there is a difference between this table and CPUID -- if
* applications want to get the raw CPUID data, they should access
* /dev/cpu/<cpu_nr>/cpuid instead.
*/
const char * const x86_cap_flags[NCAPINTS*32] = {
/* Intel-defined */
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
"pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
"fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
/* AMD-defined */
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
"3dnowext", "3dnow",
/* Transmeta-defined */
"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Other (Linux-defined) */
"cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
NULL, NULL, NULL, NULL,
"constant_tsc", "up", NULL, "arch_perfmon",
"pebs", "bts", NULL, NULL,
"rep_good", NULL, NULL, NULL,
"nopl", NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Intel-defined (#2) */
"pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
"tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* VIA/Cyrix/Centaur-defined */
NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
"ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* AMD-defined (#2) */
"lahf_lm", "cmp_legacy", "svm", "extapic",
"cr8_legacy", "abm", "sse4a", "misalignsse",
"3dnowprefetch", "osvw", "ibs", "sse5",
"skinit", "wdt", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Auxiliary (Linux-defined) */
"ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
const char *const x86_power_flags[32] = {
"ts", /* temperature sensor */
"fid", /* frequency id control */
"vid", /* voltage id control */
"ttp", /* thermal trip */
"tm",
"stc",
"100mhzsteps",
"hwpstate",
"", /* tsc invariant mapped to constant_tsc */
/* nothing */
};
......@@ -23,13 +23,6 @@
#include <mach_apic.h>
#endif
#ifdef CONFIG_X86_INTEL_USERCOPY
/*
* Alignment at which movsl is preferred for bulk memory copies.
*/
struct movsl_mask movsl_mask __read_mostly;
#endif
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
{
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
......@@ -314,69 +307,5 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
#ifndef CONFIG_X86_CMPXCHG
unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
{
u8 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u8 *)ptr;
if (prev == old)
*(u8 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u8);
unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
{
u16 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u16 *)ptr;
if (prev == old)
*(u16 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u16);
unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
{
u32 prev;
unsigned long flags;
/* Poor man's cmpxchg for 386. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u32 *)ptr;
if (prev == old)
*(u32 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_386_u32);
#endif
#ifndef CONFIG_X86_CMPXCHG64
unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new)
{
u64 prev;
unsigned long flags;
/* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */
local_irq_save(flags);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
local_irq_restore(flags);
return prev;
}
EXPORT_SYMBOL(cmpxchg_486_u64);
#endif
/* arch_initcall(intel_cpu_init); */
/*
* Routines to indentify caches on Intel CPU.
* Routines to indentify caches on Intel CPU.
*
* Changes:
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
* Changes:
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
* Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
*/
......@@ -13,6 +13,7 @@
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <asm/processor.h>
#include <asm/smp.h>
......@@ -130,9 +131,18 @@ struct _cpuid4_info {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
unsigned long can_disable;
cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
};
#ifdef CONFIG_PCI
static struct pci_device_id k8_nb_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
{}
};
#endif
unsigned short num_cache_leaves;
/* AMD doesn't have CPUID4. Emulate it here to report the same
......@@ -182,9 +192,10 @@ static unsigned short assocs[] __cpuinitdata = {
static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
union _cpuid4_leaf_ecx *ecx)
static void __cpuinit
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
union _cpuid4_leaf_ecx *ecx)
{
unsigned dummy;
unsigned line_size, lines_per_tag, assoc, size_in_kb;
......@@ -251,27 +262,40 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
(ebx->split.ways_of_associativity + 1) - 1;
}
static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
{
if (index < 3)
return;
this_leaf->can_disable = 1;
}
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned edx;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
amd_cpuid4(index, &eax, &ebx, &ecx);
else
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
if (boot_cpu_data.x86 >= 0x10)
amd_check_l3_disable(index, this_leaf);
} else {
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
}
if (eax.split.type == CACHE_TYPE_NULL)
return -EIO; /* better error ? */
this_leaf->eax = eax;
this_leaf->ebx = ebx;
this_leaf->ecx = ecx;
this_leaf->size = (ecx.split.number_of_sets + 1) *
(ebx.split.coherency_line_size + 1) *
(ebx.split.physical_line_partition + 1) *
(ebx.split.ways_of_associativity + 1);
this_leaf->size = (ecx.split.number_of_sets + 1) *
(ebx.split.coherency_line_size + 1) *
(ebx.split.physical_line_partition + 1) *
(ebx.split.ways_of_associativity + 1);
return 0;
}
......@@ -453,7 +477,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
/* pointer to _cpuid4_info array (for each cache leaf) */
static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
#ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
......@@ -490,7 +514,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
this_leaf = CPUID4_INFO_IDX(cpu, index);
for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
cpu_clear(cpu, sibling_leaf->shared_cpu_map);
}
}
......@@ -572,7 +596,7 @@ struct _index_kobject {
/* pointer to array of kobjects for cpuX/cache/indexY */
static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
#define show_one_plus(file_name, object, val) \
static ssize_t show_##file_name \
......@@ -637,6 +661,99 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
}
}
#define to_object(k) container_of(k, struct _index_kobject, kobj)
#define to_attr(a) container_of(a, struct _cache_attr, attr)
#ifdef CONFIG_PCI
static struct pci_dev *get_k8_northbridge(int node)
{
struct pci_dev *dev = NULL;
int i;
for (i = 0; i <= node; i++) {
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (!dev)
break;
} while (!pci_match_id(&k8_nb_id[0], dev));
if (!dev)
break;
}
return dev;
}
#else
static struct pci_dev *get_k8_northbridge(int node)
{
return NULL;
}
#endif
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
{
int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
struct pci_dev *dev = NULL;
ssize_t ret = 0;
int i;
if (!this_leaf->can_disable)
return sprintf(buf, "Feature not enabled\n");
dev = get_k8_northbridge(node);
if (!dev) {
printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
return -EINVAL;
}
for (i = 0; i < 2; i++) {
unsigned int reg;
pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
ret += sprintf(buf, "%sEntry: %d\n", buf, i);
ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
buf,
reg & 0x80000000 ? "Disabled" : "Allowed",
reg & 0x40000000 ? "Disabled" : "Allowed");
ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
buf, (reg & 0x30000) >> 16, reg & 0xfff);
}
return ret;
}
static ssize_t
store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
size_t count)
{
int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
struct pci_dev *dev = NULL;
unsigned int ret, index, val;
if (!this_leaf->can_disable)
return 0;
if (strlen(buf) > 15)
return -EINVAL;
ret = sscanf(buf, "%x %x", &index, &val);
if (ret != 2)
return -EINVAL;
if (index > 1)
return -EINVAL;
val |= 0xc0000000;
dev = get_k8_northbridge(node);
if (!dev) {
printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
return -EINVAL;
}
pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
wbinvd();
pci_write_config_dword(dev, 0x1BC + index * 4, val);
return 1;
}
struct _cache_attr {
struct attribute attr;
ssize_t (*show)(struct _cpuid4_info *, char *);
......@@ -657,6 +774,8 @@ define_one_ro(size);
define_one_ro(shared_cpu_map);
define_one_ro(shared_cpu_list);
static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
static struct attribute * default_attrs[] = {
&type.attr,
&level.attr,
......@@ -667,12 +786,10 @@ static struct attribute * default_attrs[] = {
&size.attr,
&shared_cpu_map.attr,
&shared_cpu_list.attr,
&cache_disable.attr,
NULL
};
#define to_object(k) container_of(k, struct _index_kobject, kobj)
#define to_attr(a) container_of(a, struct _cache_attr, attr)
static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
{
struct _cache_attr *fattr = to_attr(attr);
......@@ -682,14 +799,22 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
ret = fattr->show ?
fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
buf) :
0;
0;
return ret;
}
static ssize_t store(struct kobject * kobj, struct attribute * attr,
const char * buf, size_t count)
{
return 0;
struct _cache_attr *fattr = to_attr(attr);
struct _index_kobject *this_leaf = to_object(kobj);
ssize_t ret;
ret = fattr->store ?
fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
buf, count) :
0;
return ret;
}
static struct sysfs_ops sysfs_ops = {
......
#!/usr/bin/perl
#
# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
#
($in, $out) = @ARGV;
open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n";
open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
print OUT "#include <asm/cpufeature.h>\n\n";
print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
while (defined($line = <IN>)) {
if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
$macro = $1;
$feature = $2;
$tail = $3;
if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
$feature = $1;
}
if ($feature ne '') {
printf OUT "\t%-32s = \"%s\",\n",
"[$macro]", "\L$feature";
}
}
}
print OUT "};\n";
close(IN);
close(OUT);
......@@ -729,7 +729,7 @@ struct var_mtrr_range_state {
mtrr_type type;
};
struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
static int __initdata debug_print;
static int __init
......
/*
* Strings for the various x86 power flags
*
* This file must not contain any executable code.
*/
#include <asm/cpufeature.h>
const char *const x86_power_flags[32] = {
"ts", /* temperature sensor */
"fid", /* frequency id control */
"vid", /* voltage id control */
"ttp", /* thermal trip */
"tm",
"stc",
"100mhzsteps",
"hwpstate",
"", /* tsc invariant mapped to constant_tsc */
/* nothing */
};
......@@ -36,7 +36,6 @@
#include <linux/smp_lock.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/smp_lock.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
......
......@@ -7,9 +7,8 @@
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <linux/io.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
......@@ -25,7 +24,7 @@
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
......
......@@ -16,87 +16,63 @@
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
#endif
DEFINE_PER_CPU(int, x2apic_extra_bits);
extern struct genapic apic_flat;
extern struct genapic apic_physflat;
extern struct genapic apic_x2xpic_uv_x;
extern struct genapic apic_x2apic_phys;
extern struct genapic apic_x2apic_cluster;
struct genapic __read_mostly *genapic = &apic_flat;
static enum uv_system_type uv_system_type;
static struct genapic *apic_probe[] __initdata = {
&apic_x2apic_uv_x,
&apic_x2apic_phys,
&apic_x2apic_cluster,
&apic_physflat,
NULL,
};
/*
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
*/
void __init setup_apic_routing(void)
{
if (uv_system_type == UV_NON_UNIQUE_APIC)
genapic = &apic_x2apic_uv_x;
else
#ifdef CONFIG_ACPI
/*
* Quirk: some x86_64 machines can only use physical APIC mode
* regardless of how many processors are present (x86_64 ES7000
* is an example).
*/
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL))
genapic = &apic_physflat;
else
#endif
if (max_physical_apicid < 8)
genapic = &apic_flat;
else
genapic = &apic_physflat;
if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) {
if (!intr_remapping_enabled)
genapic = &apic_flat;
}
printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
if (genapic == &apic_flat) {
if (max_physical_apicid >= 8)
genapic = &apic_physflat;
printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
}
}
/* Same for both flat and physical. */
void send_IPI_self(int vector)
void apic_send_IPI_self(int vector)
{
__send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
}
int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (!strcmp(oem_id, "SGI")) {
if (!strcmp(oem_table_id, "UVL"))
uv_system_type = UV_LEGACY_APIC;
else if (!strcmp(oem_table_id, "UVX"))
uv_system_type = UV_X2APIC;
else if (!strcmp(oem_table_id, "UVH"))
uv_system_type = UV_NON_UNIQUE_APIC;
int i;
for (i = 0; apic_probe[i]; ++i) {
if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
genapic = apic_probe[i];
printk(KERN_INFO "Setting APIC routing to %s.\n",
genapic->name);
return 1;
}
}
return 0;
}
unsigned int read_apic_id(void)
{
unsigned int id;
WARN_ON(preemptible() && num_online_cpus() > 1);
id = apic_read(APIC_ID);
if (uv_system_type >= UV_X2APIC)
id |= __get_cpu_var(x2apic_extra_bits);
return id;
}
enum uv_system_type get_uv_system_type(void)
{
return uv_system_type;
}
int is_uv_system(void)
{
return uv_system_type != UV_NONE;
}
EXPORT_SYMBOL_GPL(is_uv_system);
......@@ -15,9 +15,20 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/hardirq.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
#include <mach_apicdef.h>
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
#endif
static int __init flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
return 1;
}
static cpumask_t flat_target_cpus(void)
{
......@@ -95,9 +106,33 @@ static void flat_send_IPI_all(int vector)
__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
}
static unsigned int get_apic_id(unsigned long x)
{
unsigned int id;
id = (((x)>>24) & 0xFFu);
return id;
}
static unsigned long set_apic_id(unsigned int id)
{
unsigned long x;
x = ((id & 0xFFu)<<24);
return x;
}
static unsigned int read_xapic_id(void)
{
unsigned int id;
id = get_apic_id(apic_read(APIC_ID));
return id;
}
static int flat_apic_id_registered(void)
{
return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
return physid_isset(read_xapic_id(), phys_cpu_present_map);
}
static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
......@@ -112,6 +147,7 @@ static unsigned int phys_pkg_id(int index_msb)
struct genapic apic_flat = {
.name = "flat",
.acpi_madt_oem_check = flat_acpi_madt_oem_check,
.int_delivery_mode = dest_LowestPrio,
.int_dest_mode = (APIC_DEST_LOGICAL != 0),
.target_cpus = flat_target_cpus,
......@@ -121,8 +157,12 @@ struct genapic apic_flat = {
.send_IPI_all = flat_send_IPI_all,
.send_IPI_allbutself = flat_send_IPI_allbutself,
.send_IPI_mask = flat_send_IPI_mask,
.send_IPI_self = apic_send_IPI_self,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFu<<24),
};
/*
......@@ -130,6 +170,21 @@ struct genapic apic_flat = {
* We cannot use logical delivery in this case because the mask
* overflows, so use physical mode.
*/
static int __init physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
#ifdef CONFIG_ACPI
/*
* Quirk: some x86_64 machines can only use physical APIC mode
* regardless of how many processors are present (x86_64 ES7000
* is an example).
*/
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL))
return 1;
#endif
return 0;
}
static cpumask_t physflat_target_cpus(void)
{
......@@ -176,6 +231,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
struct genapic apic_physflat = {
.name = "physical flat",
.acpi_madt_oem_check = physflat_acpi_madt_oem_check,
.int_delivery_mode = dest_Fixed,
.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
.target_cpus = physflat_target_cpus,
......@@ -185,6 +241,10 @@ struct genapic apic_physflat = {
.send_IPI_all = physflat_send_IPI_all,
.send_IPI_allbutself = physflat_send_IPI_allbutself,
.send_IPI_mask = physflat_send_IPI_mask,
.send_IPI_self = apic_send_IPI_self,
.cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFu<<24),
};
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (cpu_has_x2apic)
return 1;
return 0;
}
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t x2apic_target_cpus(void)
{
return cpumask_of_cpu(0);
}
/*
* for now each logical cpu is in its own vector allocation domain.
*/
static cpumask_t x2apic_vector_allocation_domain(int cpu)
{
cpumask_t domain = CPU_MASK_NONE;
cpu_set(cpu, domain);
return domain;
}
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
unsigned int dest)
{
unsigned long cfg;
cfg = __prepare_ICR(0, vector, dest);
/*
* send the IPI.
*/
x2apic_icr_write(cfg, apicid);
}
/*
* for now, we send the IPI's one by one in the cpumask.
* TBD: Based on the cpu mask, we can send the IPI's to the cluster group
* at once. We have 16 cpu's in a cluster. This will minimize IPI register
* writes.
*/
static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
{
unsigned long flags;
unsigned long query_cpu;
local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) {
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, APIC_DEST_LOGICAL);
}
local_irq_restore(flags);
}
static void x2apic_send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
x2apic_send_IPI_mask(mask, vector);
}
static void x2apic_send_IPI_all(int vector)
{
x2apic_send_IPI_mask(cpu_online_map, vector);
}
static int x2apic_apic_id_registered(void)
{
return 1;
}
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = first_cpu(cpumask);
if ((unsigned)cpu < NR_CPUS)
return per_cpu(x86_cpu_to_logical_apicid, cpu);
else
return BAD_APICID;
}
static unsigned int get_apic_id(unsigned long x)
{
unsigned int id;
id = x;
return id;
}
static unsigned long set_apic_id(unsigned int id)
{
unsigned long x;
x = id;
return x;
}
static unsigned int x2apic_read_id(void)
{
return apic_read(APIC_ID);
}
static unsigned int phys_pkg_id(int index_msb)
{
return x2apic_read_id() >> index_msb;
}
static void x2apic_send_IPI_self(int vector)
{
apic_write(APIC_SELF_IPI, vector);
}
static void init_x2apic_ldr(void)
{
int cpu = smp_processor_id();
per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
return;
}
struct genapic apic_x2apic_cluster = {
.name = "cluster x2apic",
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
.int_delivery_mode = dest_LowestPrio,
.int_dest_mode = (APIC_DEST_LOGICAL != 0),
.target_cpus = x2apic_target_cpus,
.vector_allocation_domain = x2apic_vector_allocation_domain,
.apic_id_registered = x2apic_apic_id_registered,
.init_apic_ldr = init_x2apic_ldr,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_self = x2apic_send_IPI_self,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFFFFFFFu),
};
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
static int x2apic_phys;
static int set_x2apic_phys_mode(char *arg)
{
x2apic_phys = 1;
return 0;
}
early_param("x2apic_phys", set_x2apic_phys_mode);
static int __init x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (cpu_has_x2apic && x2apic_phys)
return 1;
return 0;
}
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t x2apic_target_cpus(void)
{
return cpumask_of_cpu(0);
}
static cpumask_t x2apic_vector_allocation_domain(int cpu)
{
cpumask_t domain = CPU_MASK_NONE;
cpu_set(cpu, domain);
return domain;
}
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
unsigned int dest)
{
unsigned long cfg;
cfg = __prepare_ICR(0, vector, dest);
/*
* send the IPI.
*/
x2apic_icr_write(cfg, apicid);
}
static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
{
unsigned long flags;
unsigned long query_cpu;
local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) {
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
static void x2apic_send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
x2apic_send_IPI_mask(mask, vector);
}
static void x2apic_send_IPI_all(int vector)
{
x2apic_send_IPI_mask(cpu_online_map, vector);
}
static int x2apic_apic_id_registered(void)
{
return 1;
}
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = first_cpu(cpumask);
if ((unsigned)cpu < NR_CPUS)
return per_cpu(x86_cpu_to_apicid, cpu);
else
return BAD_APICID;
}
static unsigned int get_apic_id(unsigned long x)
{
unsigned int id;
id = x;
return id;
}
static unsigned long set_apic_id(unsigned int id)
{
unsigned long x;
x = id;
return x;
}
static unsigned int x2apic_read_id(void)
{
return apic_read(APIC_ID);
}
static unsigned int phys_pkg_id(int index_msb)
{
return x2apic_read_id() >> index_msb;
}
void x2apic_send_IPI_self(int vector)
{
apic_write(APIC_SELF_IPI, vector);
}
void init_x2apic_ldr(void)
{
return;
}
struct genapic apic_x2apic_phys = {
.name = "physical x2apic",
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
.int_delivery_mode = dest_Fixed,
.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
.target_cpus = x2apic_target_cpus,
.vector_allocation_domain = x2apic_vector_allocation_domain,
.apic_id_registered = x2apic_apic_id_registered,
.init_apic_ldr = init_x2apic_ldr,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_self = x2apic_send_IPI_self,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFFFFFFFu),
};
......@@ -12,12 +12,12 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
......@@ -26,6 +26,35 @@
#include <asm/uv/uv_hub.h>
#include <asm/uv/bios.h>
DEFINE_PER_CPU(int, x2apic_extra_bits);
static enum uv_system_type uv_system_type;
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (!strcmp(oem_id, "SGI")) {
if (!strcmp(oem_table_id, "UVL"))
uv_system_type = UV_LEGACY_APIC;
else if (!strcmp(oem_table_id, "UVX"))
uv_system_type = UV_X2APIC;
else if (!strcmp(oem_table_id, "UVH")) {
uv_system_type = UV_NON_UNIQUE_APIC;
return 1;
}
}
return 0;
}
enum uv_system_type get_uv_system_type(void)
{
return uv_system_type;
}
int is_uv_system(void)
{
return uv_system_type != UV_NONE;
}
DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
......@@ -123,6 +152,10 @@ static int uv_apic_id_registered(void)
return 1;
}
static void uv_init_apic_ldr(void)
{
}
static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
{
int cpu;
......@@ -138,9 +171,34 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
return BAD_APICID;
}
static unsigned int get_apic_id(unsigned long x)
{
unsigned int id;
WARN_ON(preemptible() && num_online_cpus() > 1);
id = x | __get_cpu_var(x2apic_extra_bits);
return id;
}
static unsigned long set_apic_id(unsigned int id)
{
unsigned long x;
/* maskout x2apic_extra_bits ? */
x = id;
return x;
}
static unsigned int uv_read_apic_id(void)
{
return get_apic_id(apic_read(APIC_ID));
}
static unsigned int phys_pkg_id(int index_msb)
{
return GET_APIC_ID(read_apic_id()) >> index_msb;
return uv_read_apic_id() >> index_msb;
}
#ifdef ZZZ /* Needs x2apic patch */
......@@ -152,17 +210,22 @@ static void uv_send_IPI_self(int vector)
struct genapic apic_x2apic_uv_x = {
.name = "UV large system",
.acpi_madt_oem_check = uv_acpi_madt_oem_check,
.int_delivery_mode = dest_Fixed,
.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
.target_cpus = uv_target_cpus,
.vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */
.apic_id_registered = uv_apic_id_registered,
.init_apic_ldr = uv_init_apic_ldr,
.send_IPI_all = uv_send_IPI_all,
.send_IPI_allbutself = uv_send_IPI_allbutself,
.send_IPI_mask = uv_send_IPI_mask,
/* ZZZ.send_IPI_self = uv_send_IPI_self, */
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */
.get_apic_id = get_apic_id,
.set_apic_id = set_apic_id,
.apic_id_mask = (0xFFFFFFFFu),
};
static __cpuinit void set_x2apic_extra_bits(int pnode)
......@@ -401,3 +464,5 @@ void __cpuinit uv_cpu_init(void)
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
set_x2apic_extra_bits(uv_hub_info->pnode);
}
......@@ -21,9 +21,12 @@
# include <asm/sigcontext32.h>
# include <asm/user32.h>
#else
# define save_i387_ia32 save_i387
# define restore_i387_ia32 restore_i387
# define save_i387_xstate_ia32 save_i387_xstate
# define restore_i387_xstate_ia32 restore_i387_xstate
# define _fpstate_ia32 _fpstate
# define _xstate_ia32 _xstate
# define sig_xstate_ia32_size sig_xstate_size
# define fx_sw_reserved_ia32 fx_sw_reserved
# define user_i387_ia32_struct user_i387_struct
# define user32_fxsr_struct user_fxsr_struct
#endif
......@@ -36,6 +39,7 @@
static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
unsigned int xstate_size;
unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
static struct i387_fxsave_struct fx_scratch __cpuinitdata;
void __cpuinit mxcsr_feature_mask_init(void)
......@@ -61,6 +65,11 @@ void __init init_thread_xstate(void)
return;
}
if (cpu_has_xsave) {
xsave_cntxt_init();
return;
}
if (cpu_has_fxsr)
xstate_size = sizeof(struct i387_fxsave_struct);
#ifdef CONFIG_X86_32
......@@ -83,9 +92,19 @@ void __cpuinit fpu_init(void)
write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */
/*
* Boot processor to setup the FP and extended state context info.
*/
if (!smp_processor_id())
init_thread_xstate();
xsave_init();
mxcsr_feature_mask_init();
/* clean state in init */
current_thread_info()->status = 0;
if (cpu_has_xsave)
current_thread_info()->status = TS_XSAVE;
else
current_thread_info()->status = 0;
clear_used_math();
}
#endif /* CONFIG_X86_64 */
......@@ -195,6 +214,13 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
*/
target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
/*
* update the header bits in the xsave header, indicating the
* presence of FP and SSE state.
*/
if (cpu_has_xsave)
target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
return ret;
}
......@@ -395,6 +421,12 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!ret)
convert_to_fxsr(target, &env);
/*
* update the header bit in the xsave header, indicating the
* presence of FP.
*/
if (cpu_has_xsave)
target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
return ret;
}
......@@ -407,7 +439,6 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
struct task_struct *tsk = current;
struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
unlazy_fpu(tsk);
fp->status = fp->swd;
if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
return -1;
......@@ -421,8 +452,6 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
struct user_i387_ia32_struct env;
int err = 0;
unlazy_fpu(tsk);
convert_from_fxsr(&env, tsk);
if (__copy_to_user(buf, &env, sizeof(env)))
return -1;
......@@ -432,16 +461,40 @@ static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
if (err)
return -1;
if (__copy_to_user(&buf->_fxsr_env[0], fx,
sizeof(struct i387_fxsave_struct)))
if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size))
return -1;
return 1;
}
int save_i387_ia32(struct _fpstate_ia32 __user *buf)
static int save_i387_xsave(void __user *buf)
{
struct _fpstate_ia32 __user *fx = buf;
int err = 0;
if (save_i387_fxsave(fx) < 0)
return -1;
err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32,
sizeof(struct _fpx_sw_bytes));
err |= __put_user(FP_XSTATE_MAGIC2,
(__u32 __user *) (buf + sig_xstate_ia32_size
- FP_XSTATE_MAGIC2_SIZE));
if (err)
return -1;
return 1;
}
int save_i387_xstate_ia32(void __user *buf)
{
struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
struct task_struct *tsk = current;
if (!used_math())
return 0;
if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size))
return -EACCES;
/*
* This will cause a "finit" to be triggered by the next
* attempted FPU operation by the 'current' process.
......@@ -451,13 +504,17 @@ int save_i387_ia32(struct _fpstate_ia32 __user *buf)
if (!HAVE_HWFP) {
return fpregs_soft_get(current, NULL,
0, sizeof(struct user_i387_ia32_struct),
NULL, buf) ? -1 : 1;
NULL, fp) ? -1 : 1;
}
unlazy_fpu(tsk);
if (cpu_has_xsave)
return save_i387_xsave(fp);
if (cpu_has_fxsr)
return save_i387_fxsave(buf);
return save_i387_fxsave(fp);
else
return save_i387_fsave(buf);
return save_i387_fsave(fp);
}
static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
......@@ -468,14 +525,15 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
sizeof(struct i387_fsave_struct));
}
static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
unsigned int size)
{
struct task_struct *tsk = current;
struct user_i387_ia32_struct env;
int err;
err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0],
sizeof(struct i387_fxsave_struct));
size);
/* mxcsr reserved bits must be masked to zero for security reasons */
tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
if (err || __copy_from_user(&env, buf, sizeof(env)))
......@@ -485,14 +543,69 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
return 0;
}
int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
static int restore_i387_xsave(void __user *buf)
{
struct _fpx_sw_bytes fx_sw_user;
struct _fpstate_ia32 __user *fx_user =
((struct _fpstate_ia32 __user *) buf);
struct i387_fxsave_struct __user *fx =
(struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
struct xsave_hdr_struct *xsave_hdr =
&current->thread.xstate->xsave.xsave_hdr;
u64 mask;
int err;
if (check_for_xstate(fx, buf, &fx_sw_user))
goto fx_only;
mask = fx_sw_user.xstate_bv;
err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
xsave_hdr->xstate_bv &= pcntxt_mask;
/*
* These bits must be zero.
*/
xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
/*
* Init the state that is not present in the memory layout
* and enabled by the OS.
*/
mask = ~(pcntxt_mask & ~mask);
xsave_hdr->xstate_bv &= mask;
return err;
fx_only:
/*
* Couldn't find the extended state information in the memory
* layout. Restore the FP/SSE and init the other extended state
* enabled by the OS.
*/
xsave_hdr->xstate_bv = XSTATE_FPSSE;
return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct));
}
int restore_i387_xstate_ia32(void __user *buf)
{
int err;
struct task_struct *tsk = current;
struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
if (HAVE_HWFP)
clear_fpu(tsk);
if (!buf) {
if (used_math()) {
clear_fpu(tsk);
clear_used_math();
}
return 0;
} else
if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size))
return -EACCES;
if (!used_math()) {
err = init_fpu(tsk);
if (err)
......@@ -500,14 +613,17 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
}
if (HAVE_HWFP) {
if (cpu_has_fxsr)
err = restore_i387_fxsave(buf);
if (cpu_has_xsave)
err = restore_i387_xsave(buf);
else if (cpu_has_fxsr)
err = restore_i387_fxsave(fp, sizeof(struct
i387_fxsave_struct));
else
err = restore_i387_fsave(buf);
err = restore_i387_fsave(fp);
} else {
err = fpregs_soft_set(current, NULL,
0, sizeof(struct user_i387_ia32_struct),
NULL, buf) != 0;
NULL, fp) != 0;
}
set_used_math();
......
......@@ -282,6 +282,30 @@ static int __init i8259A_init_sysfs(void)
device_initcall(i8259A_init_sysfs);
void mask_8259A(void)
{
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
spin_unlock_irqrestore(&i8259A_lock, flags);
}
void unmask_8259A(void)
{
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
spin_unlock_irqrestore(&i8259A_lock, flags);
}
void init_8259A(int auto_eoi)
{
unsigned long flags;
......
......@@ -46,6 +46,7 @@
#include <asm/nmi.h>
#include <asm/msidef.h>
#include <asm/hypertransport.h>
#include <asm/setup.h>
#include <mach_apic.h>
#include <mach_apicdef.h>
......@@ -1490,7 +1491,7 @@ void /*__init*/ print_local_APIC(void *dummy)
smp_processor_id(), hard_smp_processor_id());
v = apic_read(APIC_ID);
printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v,
GET_APIC_ID(read_apic_id()));
GET_APIC_ID(v));
v = apic_read(APIC_LVR);
printk(KERN_INFO "... APIC VERSION: %08x\n", v);
ver = GET_APIC_VERSION(v);
......@@ -1698,8 +1699,7 @@ void disable_IO_APIC(void)
entry.dest_mode = 0; /* Physical */
entry.delivery_mode = dest_ExtINT; /* ExtInt */
entry.vector = 0;
entry.dest.physical.physical_dest =
GET_APIC_ID(read_apic_id());
entry.dest.physical.physical_dest = read_apic_id();
/*
* Add it to the IO-APIC irq-routing table:
......@@ -1725,10 +1725,8 @@ static void __init setup_ioapic_ids_from_mpc(void)
unsigned char old_id;
unsigned long flags;
#ifdef CONFIG_X86_NUMAQ
if (found_numaq)
if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
return;
#endif
/*
* Don't check I/O APIC IDs for xAPIC systems. They have
......
This diff is collapsed.
......@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/thread_info.h>
#include <linux/syscalls.h>
#include <asm/syscalls.h>
/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
static void set_bitmap(unsigned long *bitmap, unsigned int base,
......
......@@ -20,6 +20,8 @@
#ifdef CONFIG_X86_32
#include <mach_apic.h>
#include <mach_ipi.h>
/*
* the following functions deal with sending IPIs between CPUs.
*
......@@ -147,7 +149,6 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
}
/* must come after the send_IPI functions above for inlining */
#include <mach_ipi.h>
static int convert_apicid_to_cpu(int apic_id)
{
int i;
......
......@@ -325,7 +325,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_call_count);
seq_printf(p, " function call interrupts\n");
seq_printf(p, " Function call interrupts\n");
seq_printf(p, "TLB: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
......
......@@ -129,7 +129,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "CAL: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
seq_printf(p, " function call interrupts\n");
seq_printf(p, " Function call interrupts\n");
seq_printf(p, "TLB: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
......
......@@ -18,6 +18,7 @@
#include <asm/ldt.h>
#include <asm/desc.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#ifdef CONFIG_SMP
static void flush_ldt(void *current_mm)
......
......@@ -397,7 +397,9 @@ static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
generic_bigsmp_probe();
#endif
#ifdef CONFIG_X86_32
setup_apic_routing();
#endif
if (!num_processors)
printk(KERN_ERR "MPTABLE: no processors registered!\n");
return num_processors;
......
......@@ -229,6 +229,12 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
}
}
static int __init numaq_setup_ioapic_ids(void)
{
/* so can skip it */
return 1;
}
static struct x86_quirks numaq_x86_quirks __initdata = {
.arch_pre_time_init = numaq_pre_time_init,
.arch_time_init = NULL,
......@@ -243,6 +249,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
.mpc_oem_bus_info = mpc_oem_bus_info,
.mpc_oem_pci_bus = mpc_oem_pci_bus,
.smp_read_mpc_oem = smp_read_mpc_oem,
.setup_ioapic_ids = numaq_setup_ioapic_ids,
};
void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
......
......@@ -373,8 +373,6 @@ struct pv_cpu_ops pv_cpu_ops = {
struct pv_apic_ops pv_apic_ops = {
#ifdef CONFIG_X86_LOCAL_APIC
.apic_write = native_apic_write,
.apic_read = native_apic_read,
.setup_boot_clock = setup_boot_APIC_clock,
.setup_secondary_clock = setup_secondary_APIC_clock,
.startup_ipi_hook = paravirt_nop,
......
......@@ -23,7 +23,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
start = start_##ops##_##x; \
end = end_##ops##_##x; \
goto patch_site
switch(type) {
switch (type) {
PATCH_SITE(pv_irq_ops, irq_disable);
PATCH_SITE(pv_irq_ops, irq_enable);
PATCH_SITE(pv_irq_ops, restore_fl);
......
......@@ -82,7 +82,7 @@ void __init dma32_reserve_bootmem(void)
* using 512M as goal
*/
align = 64ULL<<20;
size = round_up(dma32_bootmem_size, align);
size = roundup(dma32_bootmem_size, align);
dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
512ULL<<20);
if (dma32_bootmem_ptr)
......
......@@ -55,6 +55,8 @@
#include <asm/tlbflush.h>
#include <asm/cpu.h>
#include <asm/kdebug.h>
#include <asm/syscalls.h>
#include <asm/smp.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
......
......@@ -37,11 +37,11 @@
#include <linux/kdebug.h>
#include <linux/tick.h>
#include <linux/prctl.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/mmu_context.h>
......@@ -51,6 +51,7 @@
#include <asm/proto.h>
#include <asm/ia32.h>
#include <asm/idle.h>
#include <asm/syscalls.h>
asmlinkage extern void ret_from_fork(void);
......@@ -88,7 +89,7 @@ void exit_idle(void)
#ifdef CONFIG_HOTPLUG_CPU
DECLARE_PER_CPU(int, cpu_state);
#include <asm/nmi.h>
#include <linux/nmi.h>
/* We halt the CPU with physical CPU hotplug */
static inline void play_dead(void)
{
......@@ -151,7 +152,7 @@ void cpu_idle(void)
}
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs * regs)
void __show_regs(struct pt_regs *regs)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7;
......@@ -160,59 +161,61 @@ void __show_regs(struct pt_regs * regs)
printk("\n");
print_modules();
printk("Pid: %d, comm: %.20s %s %s %.*s\n",
printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
current->pid, current->comm, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
printk_address(regs->ip, 1);
printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp,
regs->flags);
printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
regs->sp, regs->flags);
printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
regs->ax, regs->bx, regs->cx);
printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
regs->dx, regs->si, regs->di);
printk("RBP: %016lx R08: %016lx R09: %016lx\n",
printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
regs->bp, regs->r8, regs->r9);
printk("R10: %016lx R11: %016lx R12: %016lx\n",
regs->r10, regs->r11, regs->r12);
printk("R13: %016lx R14: %016lx R15: %016lx\n",
regs->r13, regs->r14, regs->r15);
asm("movl %%ds,%0" : "=r" (ds));
asm("movl %%cs,%0" : "=r" (cs));
asm("movl %%es,%0" : "=r" (es));
printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
regs->r10, regs->r11, regs->r12);
printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
regs->r13, regs->r14, regs->r15);
asm("movl %%ds,%0" : "=r" (ds));
asm("movl %%cs,%0" : "=r" (cs));
asm("movl %%es,%0" : "=r" (es));
asm("movl %%fs,%0" : "=r" (fsindex));
asm("movl %%gs,%0" : "=r" (gsindex));
rdmsrl(MSR_FS_BASE, fs);
rdmsrl(MSR_GS_BASE, gs);
rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
rdmsrl(MSR_GS_BASE, gs);
rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = read_cr3();
cr4 = read_cr4();
printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
fs,fsindex,gs,gsindex,shadowgs);
printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
fs, fsindex, gs, gsindex, shadowgs);
printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
es, cr0);
printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
cr4);
get_debugreg(d0, 0);
get_debugreg(d1, 1);
get_debugreg(d2, 2);
printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
get_debugreg(d3, 3);
get_debugreg(d6, 6);
get_debugreg(d7, 7);
printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
}
void show_regs(struct pt_regs *regs)
{
printk("CPU %d:", smp_processor_id());
printk(KERN_INFO "CPU %d:", smp_processor_id());
__show_regs(regs);
show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
}
......@@ -313,10 +316,10 @@ void prepare_to_copy(struct task_struct *tsk)
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
struct task_struct *p, struct pt_regs *regs)
{
int err;
struct pt_regs * childregs;
struct pt_regs *childregs;
struct task_struct *me = current;
childregs = ((struct pt_regs *)
......@@ -361,10 +364,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
if (test_thread_flag(TIF_IA32))
err = do_set_thread_area(p, -1,
(struct user_desc __user *)childregs->si, 0);
else
#endif
err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
if (err)
else
#endif
err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
if (err)
goto out;
}
err = 0;
......@@ -543,7 +546,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
unsigned fsindex, gsindex;
/* we're going to use this soon, after a few expensive things */
if (next_p->fpu_counter>5)
if (next_p->fpu_counter > 5)
prefetch(next->xstate);
/*
......@@ -551,13 +554,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
load_sp0(tss, next);
/*
/*
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
*/
savesegment(es, prev->es);
if (unlikely(next->es | prev->es))
loadsegment(es, next->es);
loadsegment(es, next->es);
savesegment(ds, prev->ds);
if (unlikely(next->ds | prev->ds))
......@@ -583,7 +586,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
arch_leave_lazy_cpu_mode();
/*
/*
* Switch FS and GS.
*
* Segment register != 0 always requires a reload. Also
......@@ -592,13 +595,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex);
/*
/*
* Check if the user used a selector != 0; if yes
* clear 64bit base, since overloaded base is always
* mapped to the Null selector
*/
if (fsindex)
prev->fs = 0;
prev->fs = 0;
}
/* when next process has a 64bit base use it */
if (next->fs)
......@@ -608,7 +611,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex);
if (gsindex)
prev->gs = 0;
prev->gs = 0;
}
if (next->gs)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
......@@ -617,12 +620,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* Must be after DS reload */
unlazy_fpu(prev_p);
/*
/*
* Switch the PDA and FPU contexts.
*/
prev->usersp = read_pda(oldrsp);
write_pda(oldrsp, next->usersp);
write_pda(pcurrent, next_p);
write_pda(pcurrent, next_p);
write_pda(kernelstack,
(unsigned long)task_stack_page(next_p) +
......@@ -663,7 +666,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
char __user * __user *envp, struct pt_regs *regs)
{
long error;
char * filename;
char *filename;
filename = getname(name);
error = PTR_ERR(filename);
......@@ -721,55 +724,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long stack;
u64 fp,ip;
u64 fp, ip;
int count = 0;
if (!p || p == current || p->state==TASK_RUNNING)
return 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack = (unsigned long)task_stack_page(p);
if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
return 0;
fp = *(u64 *)(p->thread.sp);
do {
do {
if (fp < (unsigned long)stack ||
fp > (unsigned long)stack+THREAD_SIZE)
return 0;
return 0;
ip = *(u64 *)(fp+8);
if (!in_sched_functions(ip))
return ip;
fp = *(u64 *)fp;
} while (count++ < 16);
fp = *(u64 *)fp;
} while (count++ < 16);
return 0;
}
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
{
int ret = 0;
{
int ret = 0;
int doit = task == current;
int cpu;
switch (code) {
switch (code) {
case ARCH_SET_GS:
if (addr >= TASK_SIZE_OF(task))
return -EPERM;
return -EPERM;
cpu = get_cpu();
/* handle small bases via the GDT because that's faster to
/* handle small bases via the GDT because that's faster to
switch. */
if (addr <= 0xffffffff) {
set_32bit_tls(task, GS_TLS, addr);
if (doit) {
if (addr <= 0xffffffff) {
set_32bit_tls(task, GS_TLS, addr);
if (doit) {
load_TLS(&task->thread, cpu);
load_gs_index(GS_TLS_SEL);
load_gs_index(GS_TLS_SEL);
}
task->thread.gsindex = GS_TLS_SEL;
task->thread.gsindex = GS_TLS_SEL;
task->thread.gs = 0;
} else {
} else {
task->thread.gsindex = 0;
task->thread.gs = addr;
if (doit) {
load_gs_index(0);
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
}
}
}
put_cpu();
break;
......@@ -823,8 +826,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
rdmsrl(MSR_KERNEL_GS_BASE, base);
else
base = task->thread.gs;
}
else
} else
base = task->thread.gs;
ret = put_user(base, (unsigned long __user *)addr);
break;
......
......@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
......@@ -69,7 +70,7 @@ static inline bool invalid_selector(u16 value)
#define FLAG_MASK FLAG_MASK_32
static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
{
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
regno >>= 2;
......@@ -1375,30 +1376,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
force_sig_info(SIGTRAP, &info, tsk);
}
static void syscall_trace(struct pt_regs *regs)
{
if (!(current->ptrace & PT_PTRACED))
return;
#if 0
printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
current->comm,
regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
current_thread_info()->flags, current->ptrace);
#endif
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
#ifdef CONFIG_X86_32
# define IS_IA32 1
......@@ -1432,8 +1409,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
ret = -1L;
if (ret || test_thread_flag(TIF_SYSCALL_TRACE))
syscall_trace(regs);
if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
tracehook_report_syscall_entry(regs))
ret = -1L;
if (unlikely(current->audit_context)) {
if (IS_IA32)
......@@ -1459,7 +1437,7 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if (test_thread_flag(TIF_SYSCALL_TRACE))
syscall_trace(regs);
tracehook_report_syscall_exit(regs, 0);
/*
* If TIF_SYSCALL_EMU is set, we only get here because of
......@@ -1475,6 +1453,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
* system call instruction.
*/
if (test_thread_flag(TIF_SINGLESTEP) &&
(current->ptrace & PT_PTRACED))
tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
send_sigtrap(current, regs, 0);
}
......@@ -742,6 +742,8 @@ void __init setup_arch(char **cmdline_p)
#else
num_physpages = max_pfn;
if (cpu_has_x2apic)
check_x2apic();
/* How many end-of-memory variables you have, grandma! */
/* need this before calling reserve_initrd */
......
......@@ -3,9 +3,18 @@ struct sigframe {
char __user *pretcode;
int sig;
struct sigcontext sc;
struct _fpstate fpstate;
/*
* fpstate is unused. fpstate is moved/allocated after
* retcode[] below. This movement allows to have the FP state and the
* future state extensions (xsave) stay together.
* And at the same time retaining the unused fpstate, prevents changing
* the offset of extramask[] in the sigframe and thus prevent any
* legacy application accessing/modifying it.
*/
struct _fpstate fpstate_unused;
unsigned long extramask[_NSIG_WORDS-1];
char retcode[8];
/* fp state follows here */
};
struct rt_sigframe {
......@@ -15,13 +24,19 @@ struct rt_sigframe {
void __user *puc;
struct siginfo info;
struct ucontext uc;
struct _fpstate fpstate;
char retcode[8];
/* fp state follows here */
};
#else
struct rt_sigframe {
char __user *pretcode;
struct ucontext uc;
struct siginfo info;
/* fp state follows here */
};
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs);
int ia32_setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -30,7 +30,7 @@
#include <linux/init.h>
#include <asm/io.h>
#include <asm/bios_ebda.h>
#include <asm/mach-summit/mach_mpparse.h>
#include <asm/summit/mpparse.h>
static struct rio_table_hdr *rio_table_hdr __initdata;
static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
......
......@@ -22,6 +22,8 @@
#include <linux/uaccess.h>
#include <linux/unistd.h>
#include <asm/syscalls.h>
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
......
This diff is collapsed.
......@@ -8,12 +8,12 @@
#define __NO_STUBS
#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
#undef _ASM_X86_64_UNISTD_H_
#undef ASM_X86__UNISTD_64_H
#include <asm/unistd_64.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = sym,
#undef _ASM_X86_64_UNISTD_H_
#undef ASM_X86__UNISTD_64_H
typedef void (*sys_call_ptr_t)(void);
......
......@@ -36,6 +36,7 @@
#include <asm/arch_hooks.h>
#include <asm/hpet.h>
#include <asm/time.h>
#include <asm/timer.h>
#include "do_timer.h"
......
......@@ -10,6 +10,7 @@
#include <asm/ldt.h>
#include <asm/processor.h>
#include <asm/proto.h>
#include <asm/syscalls.h>
#include "tls.h"
......
......@@ -1228,7 +1228,6 @@ void __init trap_init(void)
set_bit(SYSCALL_VECTOR, used_vectors);
init_thread_xstate();
/*
* Should be a barrier for any external CPU state:
*/
......
This diff is collapsed.
This diff is collapsed.
......@@ -46,6 +46,7 @@
#include <asm/io.h>
#include <asm/tlbflush.h>
#include <asm/irq.h>
#include <asm/syscalls.h>
/*
* Known problems:
......
......@@ -905,8 +905,8 @@ static inline int __init activate_vmi(void)
#endif
#ifdef CONFIG_X86_LOCAL_APIC
para_fill(pv_apic_ops.apic_read, APICRead);
para_fill(pv_apic_ops.apic_write, APICWrite);
para_fill(apic_ops->read, APICRead);
para_fill(apic_ops->write, APICWrite);
#endif
/*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -9,4 +9,4 @@ obj-$(CONFIG_X86_NUMAQ) += numaq.o
obj-$(CONFIG_X86_SUMMIT) += summit.o
obj-$(CONFIG_X86_BIGSMP) += bigsmp.o
obj-$(CONFIG_X86_ES7000) += es7000.o
obj-$(CONFIG_X86_ES7000) += ../../x86/mach-es7000/
obj-$(CONFIG_X86_ES7000) += ../../x86/es7000/
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment