Commit a3eeeefb authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/tsc' into tracing/core

Merge it to resolve this incidental conflict between the BTS fixes/cleanups
and changes in x86/tsc:

Conflicts:
	arch/x86/kernel/cpu/intel.c
parents 30cd324e 7e3cbc3f
...@@ -80,7 +80,6 @@ ...@@ -80,7 +80,6 @@
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
...@@ -92,6 +91,8 @@ ...@@ -92,6 +91,8 @@
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ #define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
...@@ -117,6 +118,7 @@ ...@@ -117,6 +118,7 @@
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ #define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
...@@ -237,6 +239,7 @@ extern const char * const x86_power_flags[32]; ...@@ -237,6 +239,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1 # define cpu_has_invlpg 1
......
...@@ -120,9 +120,17 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) ...@@ -120,9 +120,17 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
& core_select_mask; & core_select_mask;
c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
/*
* Reinit the apicid, now that we have extended initial_apicid.
*/
c->apicid = phys_pkg_id(c->initial_apicid, 0);
#else #else
c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
c->phys_proc_id = phys_pkg_id(core_plus_mask_width); c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
/*
* Reinit the apicid, now that we have extended initial_apicid.
*/
c->apicid = phys_pkg_id(0);
#endif #endif
c->x86_max_cores = (core_level_siblings / smp_num_siblings); c->x86_max_cores = (core_level_siblings / smp_num_siblings);
......
...@@ -283,9 +283,14 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) ...@@ -283,9 +283,14 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{ {
early_init_amd_mc(c); early_init_amd_mc(c);
/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ /*
if (c->x86_power & (1<<8)) * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
* with P/T states and does not stop in deep C-states
*/
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32); set_cpu_cap(c, X86_FEATURE_SYSCALL32);
......
...@@ -40,6 +40,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) ...@@ -40,6 +40,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
if (c->x86 == 15 && c->x86_cache_alignment == 64) if (c->x86 == 15 && c->x86_cache_alignment == 64)
c->x86_cache_alignment = 128; c->x86_cache_alignment = 128;
#endif #endif
/*
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
* with P/T states and does not stop in deep C-states
*/
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
}
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -241,6 +251,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) ...@@ -241,6 +251,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
intel_workarounds(c); intel_workarounds(c);
/*
* Detect the extended topology information if available. This
* will reinitialise the initial_apicid which will be used
* in init_intel_cacheinfo()
*/
detect_extended_topology(c);
l2 = init_intel_cacheinfo(c); l2 = init_intel_cacheinfo(c);
if (c->cpuid_level > 9) { if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10); unsigned eax = cpuid_eax(10);
...@@ -308,7 +325,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) ...@@ -308,7 +325,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_P3); set_cpu_cap(c, X86_FEATURE_P3);
#endif #endif
detect_extended_topology(c);
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/* /*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology * let's use the legacy cpuid vector 0x1 and 0x4 for topology
......
...@@ -286,7 +286,7 @@ static void c1e_idle(void) ...@@ -286,7 +286,7 @@ static void c1e_idle(void)
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
if (lo & K8_INTP_C1E_ACTIVE_MASK) { if (lo & K8_INTP_C1E_ACTIVE_MASK) {
c1e_detected = 1; c1e_detected = 1;
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halt in AMD C1E"); mark_tsc_unstable("TSC halt in AMD C1E");
printk(KERN_INFO "System has AMD C1E enabled\n"); printk(KERN_INFO "System has AMD C1E enabled\n");
set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
......
...@@ -374,15 +374,15 @@ static int tsc_halts_in_c(int state) ...@@ -374,15 +374,15 @@ static int tsc_halts_in_c(int state)
{ {
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
/* /*
* AMD Fam10h TSC will tick in all * AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set. * C/P/S0/S1 states when this bit is set.
*/ */
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return 0; return 0;
/*FALL THROUGH*/ /*FALL THROUGH*/
case X86_VENDOR_INTEL:
/* Several cases known where TSC halts in C2 too */
default: default:
return state > ACPI_STATE_C1; return state > ACPI_STATE_C1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment