Commit 93b894b6 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/cpu' into perf/core, to pick up dependency

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents d3aaf09f 1b74dde7
...@@ -256,7 +256,7 @@ extern int force_personality32; ...@@ -256,7 +256,7 @@ extern int force_personality32;
instruction set this CPU supports. This could be done in user space, instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */ but it's not easy, and we've already done it here. */
#define ELF_HWCAP (boot_cpu_data.x86_capability[0]) #define ELF_HWCAP (boot_cpu_data.x86_capability[CPUID_1_EDX])
/* This yields a string that ld.so will use to load implementation /* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in specific libraries for optimization. This is more specific in
......
...@@ -117,7 +117,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) ...@@ -117,7 +117,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
void (*f_vide)(void); void (*f_vide)(void);
u64 d, d2; u64 d, d2;
printk(KERN_INFO "AMD K6 stepping B detected - "); pr_info("AMD K6 stepping B detected - ");
/* /*
* It looks like AMD fixed the 2.6.2 bug and improved indirect * It looks like AMD fixed the 2.6.2 bug and improved indirect
...@@ -133,10 +133,9 @@ static void init_amd_k6(struct cpuinfo_x86 *c) ...@@ -133,10 +133,9 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
d = d2-d; d = d2-d;
if (d > 20*K6_BUG_LOOP) if (d > 20*K6_BUG_LOOP)
printk(KERN_CONT pr_cont("system stability may be impaired when more than 32 MB are used.\n");
"system stability may be impaired when more than 32 MB are used.\n");
else else
printk(KERN_CONT "probably OK (after B9730xxxx).\n"); pr_cont("probably OK (after B9730xxxx).\n");
} }
/* K6 with old style WHCR */ /* K6 with old style WHCR */
...@@ -154,7 +153,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) ...@@ -154,7 +153,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
wbinvd(); wbinvd();
wrmsr(MSR_K6_WHCR, l, h); wrmsr(MSR_K6_WHCR, l, h);
local_irq_restore(flags); local_irq_restore(flags);
printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", pr_info("Enabling old style K6 write allocation for %d Mb\n",
mbytes); mbytes);
} }
return; return;
...@@ -175,7 +174,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) ...@@ -175,7 +174,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
wbinvd(); wbinvd();
wrmsr(MSR_K6_WHCR, l, h); wrmsr(MSR_K6_WHCR, l, h);
local_irq_restore(flags); local_irq_restore(flags);
printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", pr_info("Enabling new style K6 write allocation for %d Mb\n",
mbytes); mbytes);
} }
...@@ -202,7 +201,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) ...@@ -202,7 +201,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
*/ */
if (c->x86_model >= 6 && c->x86_model <= 10) { if (c->x86_model >= 6 && c->x86_model <= 10) {
if (!cpu_has(c, X86_FEATURE_XMM)) { if (!cpu_has(c, X86_FEATURE_XMM)) {
printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); pr_info("Enabling disabled K7/SSE Support.\n");
msr_clear_bit(MSR_K7_HWCR, 15); msr_clear_bit(MSR_K7_HWCR, 15);
set_cpu_cap(c, X86_FEATURE_XMM); set_cpu_cap(c, X86_FEATURE_XMM);
} }
...@@ -216,8 +215,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) ...@@ -216,8 +215,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
rdmsr(MSR_K7_CLK_CTL, l, h); rdmsr(MSR_K7_CLK_CTL, l, h);
if ((l & 0xfff00000) != 0x20000000) { if ((l & 0xfff00000) != 0x20000000) {
printk(KERN_INFO pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
"CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
l, ((l & 0x000fffff)|0x20000000)); l, ((l & 0x000fffff)|0x20000000));
wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
} }
...@@ -485,7 +483,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -485,7 +483,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
unsigned long pfn = tseg >> PAGE_SHIFT; unsigned long pfn = tseg >> PAGE_SHIFT;
printk(KERN_DEBUG "tseg: %010llx\n", tseg); pr_debug("tseg: %010llx\n", tseg);
if (pfn_range_is_mapped(pfn, pfn + 1)) if (pfn_range_is_mapped(pfn, pfn + 1))
set_memory_4k((unsigned long)__va(tseg), 1); set_memory_4k((unsigned long)__va(tseg), 1);
} }
...@@ -500,8 +498,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -500,8 +498,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
rdmsrl(MSR_K7_HWCR, val); rdmsrl(MSR_K7_HWCR, val);
if (!(val & BIT(24))) if (!(val & BIT(24)))
printk(KERN_WARNING FW_BUG "TSC doesn't count " pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
"with P0 frequency!\n");
} }
} }
......
...@@ -15,7 +15,7 @@ void __init check_bugs(void) ...@@ -15,7 +15,7 @@ void __init check_bugs(void)
{ {
identify_boot_cpu(); identify_boot_cpu();
#if !defined(CONFIG_SMP) #if !defined(CONFIG_SMP)
printk(KERN_INFO "CPU: "); pr_info("CPU: ");
print_cpu_info(&boot_cpu_data); print_cpu_info(&boot_cpu_data);
#endif #endif
alternative_instructions(); alternative_instructions();
......
...@@ -29,7 +29,7 @@ static void init_c3(struct cpuinfo_x86 *c) ...@@ -29,7 +29,7 @@ static void init_c3(struct cpuinfo_x86 *c)
rdmsr(MSR_VIA_FCR, lo, hi); rdmsr(MSR_VIA_FCR, lo, hi);
lo |= ACE_FCR; /* enable ACE unit */ lo |= ACE_FCR; /* enable ACE unit */
wrmsr(MSR_VIA_FCR, lo, hi); wrmsr(MSR_VIA_FCR, lo, hi);
printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); pr_info("CPU: Enabled ACE h/w crypto\n");
} }
/* enable RNG unit, if present and disabled */ /* enable RNG unit, if present and disabled */
...@@ -37,7 +37,7 @@ static void init_c3(struct cpuinfo_x86 *c) ...@@ -37,7 +37,7 @@ static void init_c3(struct cpuinfo_x86 *c)
rdmsr(MSR_VIA_RNG, lo, hi); rdmsr(MSR_VIA_RNG, lo, hi);
lo |= RNG_ENABLE; /* enable RNG unit */ lo |= RNG_ENABLE; /* enable RNG unit */
wrmsr(MSR_VIA_RNG, lo, hi); wrmsr(MSR_VIA_RNG, lo, hi);
printk(KERN_INFO "CPU: Enabled h/w RNG\n"); pr_info("CPU: Enabled h/w RNG\n");
} }
/* store Centaur Extended Feature Flags as /* store Centaur Extended Feature Flags as
...@@ -130,7 +130,7 @@ static void init_centaur(struct cpuinfo_x86 *c) ...@@ -130,7 +130,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
name = "C6"; name = "C6";
fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK; fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
fcr_clr = DPDC; fcr_clr = DPDC;
printk(KERN_NOTICE "Disabling bugged TSC.\n"); pr_notice("Disabling bugged TSC.\n");
clear_cpu_cap(c, X86_FEATURE_TSC); clear_cpu_cap(c, X86_FEATURE_TSC);
break; break;
case 8: case 8:
...@@ -163,11 +163,11 @@ static void init_centaur(struct cpuinfo_x86 *c) ...@@ -163,11 +163,11 @@ static void init_centaur(struct cpuinfo_x86 *c)
newlo = (lo|fcr_set) & (~fcr_clr); newlo = (lo|fcr_set) & (~fcr_clr);
if (newlo != lo) { if (newlo != lo) {
printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", pr_info("Centaur FCR was 0x%X now 0x%X\n",
lo, newlo); lo, newlo);
wrmsr(MSR_IDT_FCR1, newlo, hi); wrmsr(MSR_IDT_FCR1, newlo, hi);
} else { } else {
printk(KERN_INFO "Centaur FCR is 0x%X\n", lo); pr_info("Centaur FCR is 0x%X\n", lo);
} }
/* Emulate MTRRs using Centaur's MCR. */ /* Emulate MTRRs using Centaur's MCR. */
set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
......
...@@ -228,7 +228,7 @@ static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) ...@@ -228,7 +228,7 @@ static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
lo |= 0x200000; lo |= 0x200000;
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
printk(KERN_NOTICE "CPU serial number disabled.\n"); pr_notice("CPU serial number disabled.\n");
clear_cpu_cap(c, X86_FEATURE_PN); clear_cpu_cap(c, X86_FEATURE_PN);
/* Disabling the serial number may affect the cpuid level */ /* Disabling the serial number may affect the cpuid level */
...@@ -329,8 +329,7 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) ...@@ -329,8 +329,7 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
if (!warn) if (!warn)
continue; continue;
printk(KERN_WARNING pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
"CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
x86_cap_flag(df->feature), df->level); x86_cap_flag(df->feature), df->level);
} }
} }
...@@ -510,7 +509,7 @@ void detect_ht(struct cpuinfo_x86 *c) ...@@ -510,7 +509,7 @@ void detect_ht(struct cpuinfo_x86 *c)
smp_num_siblings = (ebx & 0xff0000) >> 16; smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) { if (smp_num_siblings == 1) {
printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); pr_info_once("CPU0: Hyper-Threading is disabled\n");
goto out; goto out;
} }
...@@ -531,9 +530,9 @@ void detect_ht(struct cpuinfo_x86 *c) ...@@ -531,9 +530,9 @@ void detect_ht(struct cpuinfo_x86 *c)
out: out:
if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
printk(KERN_INFO "CPU: Physical Processor ID: %d\n", pr_info("CPU: Physical Processor ID: %d\n",
c->phys_proc_id); c->phys_proc_id);
printk(KERN_INFO "CPU: Processor Core ID: %d\n", pr_info("CPU: Processor Core ID: %d\n",
c->cpu_core_id); c->cpu_core_id);
printed = 1; printed = 1;
} }
...@@ -559,8 +558,7 @@ static void get_cpu_vendor(struct cpuinfo_x86 *c) ...@@ -559,8 +558,7 @@ static void get_cpu_vendor(struct cpuinfo_x86 *c)
} }
} }
printk_once(KERN_ERR pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
"CPU: vendor_id '%s' unknown, using generic init.\n" \
"CPU: Your system may be unstable.\n", v); "CPU: Your system may be unstable.\n", v);
c->x86_vendor = X86_VENDOR_UNKNOWN; c->x86_vendor = X86_VENDOR_UNKNOWN;
...@@ -760,7 +758,7 @@ void __init early_cpu_init(void) ...@@ -760,7 +758,7 @@ void __init early_cpu_init(void)
int count = 0; int count = 0;
#ifdef CONFIG_PROCESSOR_SELECT #ifdef CONFIG_PROCESSOR_SELECT
printk(KERN_INFO "KERNEL supported cpus:\n"); pr_info("KERNEL supported cpus:\n");
#endif #endif
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
...@@ -778,7 +776,7 @@ void __init early_cpu_init(void) ...@@ -778,7 +776,7 @@ void __init early_cpu_init(void)
for (j = 0; j < 2; j++) { for (j = 0; j < 2; j++) {
if (!cpudev->c_ident[j]) if (!cpudev->c_ident[j])
continue; continue;
printk(KERN_INFO " %s %s\n", cpudev->c_vendor, pr_info(" %s %s\n", cpudev->c_vendor,
cpudev->c_ident[j]); cpudev->c_ident[j]);
} }
} }
...@@ -1061,7 +1059,7 @@ static void __print_cpu_msr(void) ...@@ -1061,7 +1059,7 @@ static void __print_cpu_msr(void)
for (index = index_min; index < index_max; index++) { for (index = index_min; index < index_max; index++) {
if (rdmsrl_safe(index, &val)) if (rdmsrl_safe(index, &val))
continue; continue;
printk(KERN_INFO " MSR%08x: %016llx\n", index, val); pr_info(" MSR%08x: %016llx\n", index, val);
} }
} }
} }
...@@ -1100,19 +1098,19 @@ void print_cpu_info(struct cpuinfo_x86 *c) ...@@ -1100,19 +1098,19 @@ void print_cpu_info(struct cpuinfo_x86 *c)
} }
if (vendor && !strstr(c->x86_model_id, vendor)) if (vendor && !strstr(c->x86_model_id, vendor))
printk(KERN_CONT "%s ", vendor); pr_cont("%s ", vendor);
if (c->x86_model_id[0]) if (c->x86_model_id[0])
printk(KERN_CONT "%s", c->x86_model_id); pr_cont("%s", c->x86_model_id);
else else
printk(KERN_CONT "%d86", c->x86); pr_cont("%d86", c->x86);
printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model); pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
if (c->x86_mask || c->cpuid_level >= 0) if (c->x86_mask || c->cpuid_level >= 0)
printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask); pr_cont(", stepping: 0x%x)\n", c->x86_mask);
else else
printk(KERN_CONT ")\n"); pr_cont(")\n");
print_cpu_msr(c); print_cpu_msr(c);
} }
...@@ -1438,7 +1436,7 @@ void cpu_init(void) ...@@ -1438,7 +1436,7 @@ void cpu_init(void)
show_ucode_info_early(); show_ucode_info_early();
printk(KERN_INFO "Initializing CPU#%d\n", cpu); pr_info("Initializing CPU#%d\n", cpu);
if (cpu_feature_enabled(X86_FEATURE_VME) || if (cpu_feature_enabled(X86_FEATURE_VME) ||
cpu_has_tsc || cpu_has_tsc ||
......
...@@ -103,7 +103,7 @@ static void check_cx686_slop(struct cpuinfo_x86 *c) ...@@ -103,7 +103,7 @@ static void check_cx686_slop(struct cpuinfo_x86 *c)
local_irq_restore(flags); local_irq_restore(flags);
if (ccr5 & 2) { /* possible wrong calibration done */ if (ccr5 & 2) { /* possible wrong calibration done */
printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n"); pr_info("Recalibrating delay loop with SLOP bit reset\n");
calibrate_delay(); calibrate_delay();
c->loops_per_jiffy = loops_per_jiffy; c->loops_per_jiffy = loops_per_jiffy;
} }
...@@ -115,7 +115,7 @@ static void set_cx86_reorder(void) ...@@ -115,7 +115,7 @@ static void set_cx86_reorder(void)
{ {
u8 ccr3; u8 ccr3;
printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n"); pr_info("Enable Memory access reorder on Cyrix/NSC processor.\n");
ccr3 = getCx86(CX86_CCR3); ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
...@@ -128,7 +128,7 @@ static void set_cx86_reorder(void) ...@@ -128,7 +128,7 @@ static void set_cx86_reorder(void)
static void set_cx86_memwb(void) static void set_cx86_memwb(void)
{ {
printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
/* CCR2 bit 2: unlock NW bit */ /* CCR2 bit 2: unlock NW bit */
setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
...@@ -268,7 +268,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) ...@@ -268,7 +268,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
* VSA1 we work around however. * VSA1 we work around however.
*/ */
printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); pr_info("Working around Cyrix MediaGX virtual DMA bugs.\n");
isa_dma_bridge_buggy = 2; isa_dma_bridge_buggy = 2;
/* We do this before the PCI layer is running. However we /* We do this before the PCI layer is running. However we
...@@ -426,7 +426,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c) ...@@ -426,7 +426,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c)
if (dir0 == 5 || dir0 == 3) { if (dir0 == 5 || dir0 == 3) {
unsigned char ccr3; unsigned char ccr3;
unsigned long flags; unsigned long flags;
printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); pr_info("Enabling CPUID on Cyrix processor.\n");
local_irq_save(flags); local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3); ccr3 = getCx86(CX86_CCR3);
/* enable MAPEN */ /* enable MAPEN */
......
...@@ -56,7 +56,7 @@ detect_hypervisor_vendor(void) ...@@ -56,7 +56,7 @@ detect_hypervisor_vendor(void)
} }
if (max_pri) if (max_pri)
printk(KERN_INFO "Hypervisor detected: %s\n", x86_hyper->name); pr_info("Hypervisor detected: %s\n", x86_hyper->name);
} }
void init_hypervisor(struct cpuinfo_x86 *c) void init_hypervisor(struct cpuinfo_x86 *c)
......
...@@ -61,7 +61,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -61,7 +61,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*/ */
if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
c->microcode < 0x20e) { c->microcode < 0x20e) {
printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
clear_cpu_cap(c, X86_FEATURE_PSE); clear_cpu_cap(c, X86_FEATURE_PSE);
} }
...@@ -140,7 +140,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -140,7 +140,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
printk(KERN_INFO "Disabled fast string operations\n"); pr_info("Disabled fast string operations\n");
setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
setup_clear_cpu_cap(X86_FEATURE_ERMS); setup_clear_cpu_cap(X86_FEATURE_ERMS);
} }
...@@ -176,7 +176,7 @@ int ppro_with_ram_bug(void) ...@@ -176,7 +176,7 @@ int ppro_with_ram_bug(void)
boot_cpu_data.x86 == 6 && boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask < 8) { boot_cpu_data.x86_mask < 8) {
printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
return 1; return 1;
} }
return 0; return 0;
...@@ -225,7 +225,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) ...@@ -225,7 +225,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_F00F); set_cpu_bug(c, X86_BUG_F00F);
if (!f00f_workaround_enabled) { if (!f00f_workaround_enabled) {
printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
f00f_workaround_enabled = 1; f00f_workaround_enabled = 1;
} }
} }
...@@ -244,7 +244,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) ...@@ -244,7 +244,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* Forcefully enable PAE if kernel parameter "forcepae" is present. * Forcefully enable PAE if kernel parameter "forcepae" is present.
*/ */
if (forcepae) { if (forcepae) {
printk(KERN_WARNING "PAE forced!\n"); pr_warn("PAE forced!\n");
set_cpu_cap(c, X86_FEATURE_PAE); set_cpu_cap(c, X86_FEATURE_PAE);
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
} }
......
...@@ -444,7 +444,7 @@ static ssize_t store_cache_disable(struct cacheinfo *this_leaf, ...@@ -444,7 +444,7 @@ static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
err = amd_set_l3_disable_slot(nb, cpu, slot, val); err = amd_set_l3_disable_slot(nb, cpu, slot, val);
if (err) { if (err) {
if (err == -EEXIST) if (err == -EEXIST)
pr_warning("L3 slot %d in use/index already disabled!\n", pr_warn("L3 slot %d in use/index already disabled!\n",
slot); slot);
return err; return err;
} }
......
...@@ -115,7 +115,7 @@ static int raise_local(void) ...@@ -115,7 +115,7 @@ static int raise_local(void)
int cpu = m->extcpu; int cpu = m->extcpu;
if (m->inject_flags & MCJ_EXCEPTION) { if (m->inject_flags & MCJ_EXCEPTION) {
printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu); pr_info("Triggering MCE exception on CPU %d\n", cpu);
switch (context) { switch (context) {
case MCJ_CTX_IRQ: case MCJ_CTX_IRQ:
/* /*
...@@ -128,15 +128,15 @@ static int raise_local(void) ...@@ -128,15 +128,15 @@ static int raise_local(void)
raise_exception(m, NULL); raise_exception(m, NULL);
break; break;
default: default:
printk(KERN_INFO "Invalid MCE context\n"); pr_info("Invalid MCE context\n");
ret = -EINVAL; ret = -EINVAL;
} }
printk(KERN_INFO "MCE exception done on CPU %d\n", cpu); pr_info("MCE exception done on CPU %d\n", cpu);
} else if (m->status) { } else if (m->status) {
printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); pr_info("Starting machine check poll CPU %d\n", cpu);
raise_poll(m); raise_poll(m);
mce_notify_irq(); mce_notify_irq();
printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu); pr_info("Machine check poll done on CPU %d\n", cpu);
} else } else
m->finished = 0; m->finished = 0;
...@@ -183,8 +183,7 @@ static void raise_mce(struct mce *m) ...@@ -183,8 +183,7 @@ static void raise_mce(struct mce *m)
start = jiffies; start = jiffies;
while (!cpumask_empty(mce_inject_cpumask)) { while (!cpumask_empty(mce_inject_cpumask)) {
if (!time_before(jiffies, start + 2*HZ)) { if (!time_before(jiffies, start + 2*HZ)) {
printk(KERN_ERR pr_err("Timeout waiting for mce inject %lx\n",
"Timeout waiting for mce inject %lx\n",
*cpumask_bits(mce_inject_cpumask)); *cpumask_bits(mce_inject_cpumask));
break; break;
} }
...@@ -241,7 +240,7 @@ static int inject_init(void) ...@@ -241,7 +240,7 @@ static int inject_init(void)
{ {
if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL)) if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
printk(KERN_INFO "Machine check injector initialized\n"); pr_info("Machine check injector initialized\n");
register_mce_write_callback(mce_write); register_mce_write_callback(mce_write);
register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
"mce_notify"); "mce_notify");
......
...@@ -26,13 +26,11 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code) ...@@ -26,13 +26,11 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi); rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi); rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
printk(KERN_EMERG pr_emerg("CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n",
"CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).\n",
smp_processor_id(), loaddr, lotype); smp_processor_id(), loaddr, lotype);
if (lotype & (1<<5)) { if (lotype & (1<<5)) {
printk(KERN_EMERG pr_emerg("CPU#%d: Possible thermal failure (CPU on fire ?).\n",
"CPU#%d: Possible thermal failure (CPU on fire ?).\n",
smp_processor_id()); smp_processor_id());
} }
...@@ -61,12 +59,10 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c) ...@@ -61,12 +59,10 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
/* Read registers before enabling: */ /* Read registers before enabling: */
rdmsr(MSR_IA32_P5_MC_ADDR, l, h); rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
rdmsr(MSR_IA32_P5_MC_TYPE, l, h); rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
printk(KERN_INFO pr_info("Intel old style machine check architecture supported.\n");
"Intel old style machine check architecture supported.\n");
/* Enable MCE: */ /* Enable MCE: */
cr4_set_bits(X86_CR4_MCE); cr4_set_bits(X86_CR4_MCE);
printk(KERN_INFO pr_info("Intel old style machine check reporting enabled on CPU#%d.\n",
"Intel old style machine check reporting enabled on CPU#%d.\n",
smp_processor_id()); smp_processor_id());
} }
...@@ -190,7 +190,7 @@ static int therm_throt_process(bool new_event, int event, int level) ...@@ -190,7 +190,7 @@ static int therm_throt_process(bool new_event, int event, int level)
/* if we just entered the thermal event */ /* if we just entered the thermal event */
if (new_event) { if (new_event) {
if (event == THERMAL_THROTTLING_EVENT) if (event == THERMAL_THROTTLING_EVENT)
printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n", pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
this_cpu, this_cpu,
level == CORE_LEVEL ? "Core" : "Package", level == CORE_LEVEL ? "Core" : "Package",
state->count); state->count);
...@@ -198,8 +198,7 @@ static int therm_throt_process(bool new_event, int event, int level) ...@@ -198,8 +198,7 @@ static int therm_throt_process(bool new_event, int event, int level)
} }
if (old_event) { if (old_event) {
if (event == THERMAL_THROTTLING_EVENT) if (event == THERMAL_THROTTLING_EVENT)
printk(KERN_INFO "CPU%d: %s temperature/speed normal\n", pr_info("CPU%d: %s temperature/speed normal\n", this_cpu,
this_cpu,
level == CORE_LEVEL ? "Core" : "Package"); level == CORE_LEVEL ? "Core" : "Package");
return 1; return 1;
} }
...@@ -417,7 +416,7 @@ static void intel_thermal_interrupt(void) ...@@ -417,7 +416,7 @@ static void intel_thermal_interrupt(void)
static void unexpected_thermal_interrupt(void) static void unexpected_thermal_interrupt(void)
{ {
printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n", pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
smp_processor_id()); smp_processor_id());
} }
...@@ -499,7 +498,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c) ...@@ -499,7 +498,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
if (system_state == SYSTEM_BOOTING) if (system_state == SYSTEM_BOOTING)
printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", cpu); pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu);
return; return;
} }
...@@ -557,7 +556,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c) ...@@ -557,7 +556,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
l = apic_read(APIC_LVTTHMR); l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n", pr_info_once("CPU0: Thermal monitoring enabled (%s)\n",
tm2 ? "TM2" : "TM1"); tm2 ? "TM2" : "TM1");
/* enable thermal throttle processing */ /* enable thermal throttle processing */
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
static void default_threshold_interrupt(void) static void default_threshold_interrupt(void)
{ {
printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n", pr_err("Unexpected threshold interrupt at vector %x\n",
THRESHOLD_APIC_VECTOR); THRESHOLD_APIC_VECTOR);
} }
......
...@@ -17,7 +17,7 @@ static void winchip_machine_check(struct pt_regs *regs, long error_code) ...@@ -17,7 +17,7 @@ static void winchip_machine_check(struct pt_regs *regs, long error_code)
{ {
ist_enter(regs); ist_enter(regs);
printk(KERN_EMERG "CPU0: Machine Check Exception.\n"); pr_emerg("CPU0: Machine Check Exception.\n");
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
ist_exit(regs); ist_exit(regs);
...@@ -39,6 +39,5 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c) ...@@ -39,6 +39,5 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
cr4_set_bits(X86_CR4_MCE); cr4_set_bits(X86_CR4_MCE);
printk(KERN_INFO pr_info("Winchip machine check reporting enabled on CPU#0.\n");
"Winchip machine check reporting enabled on CPU#0.\n");
} }
...@@ -953,7 +953,7 @@ struct microcode_ops * __init init_amd_microcode(void) ...@@ -953,7 +953,7 @@ struct microcode_ops * __init init_amd_microcode(void)
struct cpuinfo_x86 *c = &boot_cpu_data; struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
pr_warning("AMD CPU family 0x%x not supported\n", c->x86); pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
return NULL; return NULL;
} }
......
...@@ -161,7 +161,7 @@ static void __init ms_hyperv_init_platform(void) ...@@ -161,7 +161,7 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", pr_info("HyperV: features 0x%x, hints 0x%x\n",
ms_hyperv.features, ms_hyperv.hints); ms_hyperv.features, ms_hyperv.hints);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
...@@ -174,7 +174,7 @@ static void __init ms_hyperv_init_platform(void) ...@@ -174,7 +174,7 @@ static void __init ms_hyperv_init_platform(void)
rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency); rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ); hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
lapic_timer_frequency = hv_lapic_frequency; lapic_timer_frequency = hv_lapic_frequency;
printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n", pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
lapic_timer_frequency); lapic_timer_frequency);
} }
#endif #endif
......
...@@ -103,7 +103,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t ...@@ -103,7 +103,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
*/ */
if (type != MTRR_TYPE_WRCOMB && if (type != MTRR_TYPE_WRCOMB &&
(centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
pr_warning("mtrr: only write-combining%s supported\n", pr_warn("mtrr: only write-combining%s supported\n",
centaur_mcr_type ? " and uncacheable are" : " is"); centaur_mcr_type ? " and uncacheable are" : " is");
return -EINVAL; return -EINVAL;
} }
......
...@@ -57,9 +57,9 @@ static int __initdata nr_range; ...@@ -57,9 +57,9 @@ static int __initdata nr_range;
static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
static int __initdata debug_print; static int __initdata debug_print;
#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0) #define Dprintk(x...) do { if (debug_print) pr_debug(x); } while (0)
#define BIOS_BUG_MSG KERN_WARNING \ #define BIOS_BUG_MSG \
"WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
static int __init static int __init
...@@ -81,9 +81,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, ...@@ -81,9 +81,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
base, base + size); base, base + size);
} }
if (debug_print) { if (debug_print) {
printk(KERN_DEBUG "After WB checking\n"); pr_debug("After WB checking\n");
for (i = 0; i < nr_range; i++) for (i = 0; i < nr_range; i++)
printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end); range[i].start, range[i].end);
} }
...@@ -101,7 +101,7 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, ...@@ -101,7 +101,7 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) && (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
(mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) { (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
/* Var MTRR contains UC entry below 1M? Skip it: */ /* Var MTRR contains UC entry below 1M? Skip it: */
printk(BIOS_BUG_MSG, i); pr_warn(BIOS_BUG_MSG, i);
if (base + size <= (1<<(20-PAGE_SHIFT))) if (base + size <= (1<<(20-PAGE_SHIFT)))
continue; continue;
size -= (1<<(20-PAGE_SHIFT)) - base; size -= (1<<(20-PAGE_SHIFT)) - base;
...@@ -114,11 +114,11 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, ...@@ -114,11 +114,11 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
extra_remove_base + extra_remove_size); extra_remove_base + extra_remove_size);
if (debug_print) { if (debug_print) {
printk(KERN_DEBUG "After UC checking\n"); pr_debug("After UC checking\n");
for (i = 0; i < RANGE_NUM; i++) { for (i = 0; i < RANGE_NUM; i++) {
if (!range[i].end) if (!range[i].end)
continue; continue;
printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end); range[i].start, range[i].end);
} }
} }
...@@ -126,9 +126,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, ...@@ -126,9 +126,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
/* sort the ranges */ /* sort the ranges */
nr_range = clean_sort_range(range, RANGE_NUM); nr_range = clean_sort_range(range, RANGE_NUM);
if (debug_print) { if (debug_print) {
printk(KERN_DEBUG "After sorting\n"); pr_debug("After sorting\n");
for (i = 0; i < nr_range; i++) for (i = 0; i < nr_range; i++)
printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end); range[i].start, range[i].end);
} }
...@@ -544,7 +544,7 @@ static void __init print_out_mtrr_range_state(void) ...@@ -544,7 +544,7 @@ static void __init print_out_mtrr_range_state(void)
start_base = to_size_factor(start_base, &start_factor), start_base = to_size_factor(start_base, &start_factor),
type = range_state[i].type; type = range_state[i].type;
printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", pr_debug("reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
i, start_base, start_factor, i, start_base, start_factor,
size_base, size_factor, size_base, size_factor,
(type == MTRR_TYPE_UNCACHABLE) ? "UC" : (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
...@@ -713,7 +713,7 @@ int __init mtrr_cleanup(unsigned address_bits) ...@@ -713,7 +713,7 @@ int __init mtrr_cleanup(unsigned address_bits)
return 0; return 0;
/* Print original var MTRRs at first, for debugging: */ /* Print original var MTRRs at first, for debugging: */
printk(KERN_DEBUG "original variable MTRRs\n"); pr_debug("original variable MTRRs\n");
print_out_mtrr_range_state(); print_out_mtrr_range_state();
memset(range, 0, sizeof(range)); memset(range, 0, sizeof(range));
...@@ -733,7 +733,7 @@ int __init mtrr_cleanup(unsigned address_bits) ...@@ -733,7 +733,7 @@ int __init mtrr_cleanup(unsigned address_bits)
x_remove_base, x_remove_size); x_remove_base, x_remove_size);
range_sums = sum_ranges(range, nr_range); range_sums = sum_ranges(range, nr_range);
printk(KERN_INFO "total RAM covered: %ldM\n", pr_info("total RAM covered: %ldM\n",
range_sums >> (20 - PAGE_SHIFT)); range_sums >> (20 - PAGE_SHIFT));
if (mtrr_chunk_size && mtrr_gran_size) { if (mtrr_chunk_size && mtrr_gran_size) {
...@@ -745,12 +745,11 @@ int __init mtrr_cleanup(unsigned address_bits) ...@@ -745,12 +745,11 @@ int __init mtrr_cleanup(unsigned address_bits)
if (!result[i].bad) { if (!result[i].bad) {
set_var_mtrr_all(address_bits); set_var_mtrr_all(address_bits);
printk(KERN_DEBUG "New variable MTRRs\n"); pr_debug("New variable MTRRs\n");
print_out_mtrr_range_state(); print_out_mtrr_range_state();
return 1; return 1;
} }
printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " pr_info("invalid mtrr_gran_size or mtrr_chunk_size, will find optimal one\n");
"will find optimal one\n");
} }
i = 0; i = 0;
...@@ -768,7 +767,7 @@ int __init mtrr_cleanup(unsigned address_bits) ...@@ -768,7 +767,7 @@ int __init mtrr_cleanup(unsigned address_bits)
x_remove_base, x_remove_size, i); x_remove_base, x_remove_size, i);
if (debug_print) { if (debug_print) {
mtrr_print_out_one_result(i); mtrr_print_out_one_result(i);
printk(KERN_INFO "\n"); pr_info("\n");
} }
i++; i++;
...@@ -779,7 +778,7 @@ int __init mtrr_cleanup(unsigned address_bits) ...@@ -779,7 +778,7 @@ int __init mtrr_cleanup(unsigned address_bits)
index_good = mtrr_search_optimal_index(); index_good = mtrr_search_optimal_index();
if (index_good != -1) { if (index_good != -1) {
printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); pr_info("Found optimal setting for mtrr clean up\n");
i = index_good; i = index_good;
mtrr_print_out_one_result(i); mtrr_print_out_one_result(i);
...@@ -790,7 +789,7 @@ int __init mtrr_cleanup(unsigned address_bits) ...@@ -790,7 +789,7 @@ int __init mtrr_cleanup(unsigned address_bits)
gran_size <<= 10; gran_size <<= 10;
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
set_var_mtrr_all(address_bits); set_var_mtrr_all(address_bits);
printk(KERN_DEBUG "New variable MTRRs\n"); pr_debug("New variable MTRRs\n");
print_out_mtrr_range_state(); print_out_mtrr_range_state();
return 1; return 1;
} else { } else {
...@@ -799,8 +798,8 @@ int __init mtrr_cleanup(unsigned address_bits) ...@@ -799,8 +798,8 @@ int __init mtrr_cleanup(unsigned address_bits)
mtrr_print_out_one_result(i); mtrr_print_out_one_result(i);
} }
printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n"); pr_info("mtrr_cleanup: can not find optimal value\n");
printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n"); pr_info("please specify mtrr_gran_size/mtrr_chunk_size\n");
return 0; return 0;
} }
...@@ -918,7 +917,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) ...@@ -918,7 +917,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
/* kvm/qemu doesn't have mtrr set right, don't trim them all: */ /* kvm/qemu doesn't have mtrr set right, don't trim them all: */
if (!highest_pfn) { if (!highest_pfn) {
printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); pr_info("CPU MTRRs all blank - virtualized system.\n");
return 0; return 0;
} }
...@@ -973,7 +972,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) ...@@ -973,7 +972,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
end_pfn); end_pfn);
if (total_trim_size) { if (total_trim_size) {
pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20); pr_warn("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n",
total_trim_size >> 20);
if (!changed_by_mtrr_cleanup) if (!changed_by_mtrr_cleanup)
WARN_ON(1); WARN_ON(1);
......
...@@ -55,7 +55,7 @@ static inline void k8_check_syscfg_dram_mod_en(void) ...@@ -55,7 +55,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
rdmsr(MSR_K8_SYSCFG, lo, hi); rdmsr(MSR_K8_SYSCFG, lo, hi);
if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
" not cleared by BIOS, clearing this bit\n", " not cleared by BIOS, clearing this bit\n",
smp_processor_id()); smp_processor_id());
lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
...@@ -501,14 +501,14 @@ void __init mtrr_state_warn(void) ...@@ -501,14 +501,14 @@ void __init mtrr_state_warn(void)
if (!mask) if (!mask)
return; return;
if (mask & MTRR_CHANGE_MASK_FIXED) if (mask & MTRR_CHANGE_MASK_FIXED)
pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_VARIABLE) if (mask & MTRR_CHANGE_MASK_VARIABLE)
pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n"); pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_DEFTYPE) if (mask & MTRR_CHANGE_MASK_DEFTYPE)
pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
printk(KERN_INFO "mtrr: corrected configuration.\n"); pr_info("mtrr: corrected configuration.\n");
} }
/* /*
...@@ -519,8 +519,7 @@ void __init mtrr_state_warn(void) ...@@ -519,8 +519,7 @@ void __init mtrr_state_warn(void)
void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
{ {
if (wrmsr_safe(msr, a, b) < 0) { if (wrmsr_safe(msr, a, b) < 0) {
printk(KERN_ERR pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
smp_processor_id(), msr, a, b); smp_processor_id(), msr, a, b);
} }
} }
...@@ -607,7 +606,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, ...@@ -607,7 +606,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
tmp |= ~((1ULL<<(hi - 1)) - 1); tmp |= ~((1ULL<<(hi - 1)) - 1);
if (tmp != mask) { if (tmp != mask) {
printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
mask = tmp; mask = tmp;
} }
...@@ -858,13 +857,13 @@ int generic_validate_add_page(unsigned long base, unsigned long size, ...@@ -858,13 +857,13 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask <= 7) { boot_cpu_data.x86_mask <= 7) {
if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
return -EINVAL; return -EINVAL;
} }
if (!(base + size < 0x70000 || base > 0x7003F) && if (!(base + size < 0x70000 || base > 0x7003F) &&
(type == MTRR_TYPE_WRCOMB (type == MTRR_TYPE_WRCOMB
|| type == MTRR_TYPE_WRBACK)) { || type == MTRR_TYPE_WRBACK)) {
pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
return -EINVAL; return -EINVAL;
} }
} }
...@@ -878,7 +877,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, ...@@ -878,7 +877,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
lbase = lbase >> 1, last = last >> 1) lbase = lbase >> 1, last = last >> 1)
; ;
if (lbase != last) { if (lbase != last) {
pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
......
...@@ -300,24 +300,24 @@ int mtrr_add_page(unsigned long base, unsigned long size, ...@@ -300,24 +300,24 @@ int mtrr_add_page(unsigned long base, unsigned long size,
return error; return error;
if (type >= MTRR_NUM_TYPES) { if (type >= MTRR_NUM_TYPES) {
pr_warning("mtrr: type: %u invalid\n", type); pr_warn("mtrr: type: %u invalid\n", type);
return -EINVAL; return -EINVAL;
} }
/* If the type is WC, check that this processor supports it */ /* If the type is WC, check that this processor supports it */
if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
pr_warning("mtrr: your processor doesn't support write-combining\n"); pr_warn("mtrr: your processor doesn't support write-combining\n");
return -ENOSYS; return -ENOSYS;
} }
if (!size) { if (!size) {
pr_warning("mtrr: zero sized request\n"); pr_warn("mtrr: zero sized request\n");
return -EINVAL; return -EINVAL;
} }
if ((base | (base + size - 1)) >> if ((base | (base + size - 1)) >>
(boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
pr_warning("mtrr: base or size exceeds the MTRR width\n"); pr_warn("mtrr: base or size exceeds the MTRR width\n");
return -EINVAL; return -EINVAL;
} }
...@@ -348,7 +348,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, ...@@ -348,7 +348,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
} else if (types_compatible(type, ltype)) } else if (types_compatible(type, ltype))
continue; continue;
} }
pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing" pr_warn("mtrr: 0x%lx000,0x%lx000 overlaps existing"
" 0x%lx000,0x%lx000\n", base, size, lbase, " 0x%lx000,0x%lx000\n", base, size, lbase,
lsize); lsize);
goto out; goto out;
...@@ -357,7 +357,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, ...@@ -357,7 +357,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
if (ltype != type) { if (ltype != type) {
if (types_compatible(type, ltype)) if (types_compatible(type, ltype))
continue; continue;
pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", pr_warn("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
base, size, mtrr_attrib_to_str(ltype), base, size, mtrr_attrib_to_str(ltype),
mtrr_attrib_to_str(type)); mtrr_attrib_to_str(type));
goto out; goto out;
...@@ -395,7 +395,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, ...@@ -395,7 +395,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
static int mtrr_check(unsigned long base, unsigned long size) static int mtrr_check(unsigned long base, unsigned long size)
{ {
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
pr_warning("mtrr: size and base must be multiples of 4 kiB\n"); pr_warn("mtrr: size and base must be multiples of 4 kiB\n");
pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base); pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
dump_stack(); dump_stack();
return -1; return -1;
...@@ -493,16 +493,16 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) ...@@ -493,16 +493,16 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
} }
} }
if (reg >= max) { if (reg >= max) {
pr_warning("mtrr: register: %d too big\n", reg); pr_warn("mtrr: register: %d too big\n", reg);
goto out; goto out;
} }
mtrr_if->get(reg, &lbase, &lsize, &ltype); mtrr_if->get(reg, &lbase, &lsize, &ltype);
if (lsize < 1) { if (lsize < 1) {
pr_warning("mtrr: MTRR %d not used\n", reg); pr_warn("mtrr: MTRR %d not used\n", reg);
goto out; goto out;
} }
if (mtrr_usage_table[reg] < 1) { if (mtrr_usage_table[reg] < 1) {
pr_warning("mtrr: reg: %d has count=0\n", reg); pr_warn("mtrr: reg: %d has count=0\n", reg);
goto out; goto out;
} }
if (--mtrr_usage_table[reg] < 1) if (--mtrr_usage_table[reg] < 1)
......
...@@ -254,15 +254,16 @@ static bool check_hw_exists(void) ...@@ -254,15 +254,16 @@ static bool check_hw_exists(void)
* We still allow the PMU driver to operate: * We still allow the PMU driver to operate:
*/ */
if (bios_fail) { if (bios_fail) {
printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n"); pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail); pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
reg_fail, val_fail);
} }
return true; return true;
msr_fail: msr_fail:
printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); pr_cont("Broken PMU hardware detected, using software events only.\n");
printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n", pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR, boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
reg, val_new); reg, val_new);
......
...@@ -670,7 +670,7 @@ static __init int perf_event_ibs_init(void) ...@@ -670,7 +670,7 @@ static __init int perf_event_ibs_init(void)
perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps); pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
return 0; return 0;
} }
...@@ -774,14 +774,14 @@ static int setup_ibs_ctl(int ibs_eilvt_off) ...@@ -774,14 +774,14 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
pci_read_config_dword(cpu_cfg, IBSCTL, &value); pci_read_config_dword(cpu_cfg, IBSCTL, &value);
if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
pci_dev_put(cpu_cfg); pci_dev_put(cpu_cfg);
printk(KERN_DEBUG "Failed to setup IBS LVT offset, " pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
"IBSCTL = 0x%08x\n", value); value);
return -EINVAL; return -EINVAL;
} }
} while (1); } while (1);
if (!nodes) { if (!nodes) {
printk(KERN_DEBUG "No CPU node configured for IBS\n"); pr_debug("No CPU node configured for IBS\n");
return -ENODEV; return -ENODEV;
} }
...@@ -810,7 +810,7 @@ static void force_ibs_eilvt_setup(void) ...@@ -810,7 +810,7 @@ static void force_ibs_eilvt_setup(void)
preempt_enable(); preempt_enable();
if (offset == APIC_EILVT_NR_MAX) { if (offset == APIC_EILVT_NR_MAX) {
printk(KERN_DEBUG "No EILVT entry available\n"); pr_debug("No EILVT entry available\n");
return; return;
} }
......
...@@ -536,7 +536,7 @@ static int __init amd_uncore_init(void) ...@@ -536,7 +536,7 @@ static int __init amd_uncore_init(void)
if (ret) if (ret)
goto fail_nb; goto fail_nb;
printk(KERN_INFO "perf: AMD NB counters detected\n"); pr_info("perf: AMD NB counters detected\n");
ret = 0; ret = 0;
} }
...@@ -550,7 +550,7 @@ static int __init amd_uncore_init(void) ...@@ -550,7 +550,7 @@ static int __init amd_uncore_init(void)
if (ret) if (ret)
goto fail_l2; goto fail_l2;
printk(KERN_INFO "perf: AMD L2I counters detected\n"); pr_info("perf: AMD L2I counters detected\n");
ret = 0; ret = 0;
} }
......
...@@ -1325,13 +1325,13 @@ void __init intel_ds_init(void) ...@@ -1325,13 +1325,13 @@ void __init intel_ds_init(void)
switch (format) { switch (format) {
case 0: case 0:
printk(KERN_CONT "PEBS fmt0%c, ", pebs_type); pr_cont("PEBS fmt0%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core); x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core; x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
break; break;
case 1: case 1:
printk(KERN_CONT "PEBS fmt1%c, ", pebs_type); pr_cont("PEBS fmt1%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm); x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
break; break;
...@@ -1351,7 +1351,7 @@ void __init intel_ds_init(void) ...@@ -1351,7 +1351,7 @@ void __init intel_ds_init(void)
break; break;
default: default:
printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type); pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
x86_pmu.pebs = 0; x86_pmu.pebs = 0;
} }
} }
......
...@@ -51,7 +51,7 @@ void x86_init_rdrand(struct cpuinfo_x86 *c) ...@@ -51,7 +51,7 @@ void x86_init_rdrand(struct cpuinfo_x86 *c)
for (i = 0; i < SANITY_CHECK_LOOPS; i++) { for (i = 0; i < SANITY_CHECK_LOOPS; i++) {
if (!rdrand_long(&tmp)) { if (!rdrand_long(&tmp)) {
clear_cpu_cap(c, X86_FEATURE_RDRAND); clear_cpu_cap(c, X86_FEATURE_RDRAND);
printk_once(KERN_WARNING "rdrand: disabled\n"); pr_warn_once("rdrand: disabled\n");
return; return;
} }
} }
......
...@@ -87,10 +87,10 @@ void detect_extended_topology(struct cpuinfo_x86 *c) ...@@ -87,10 +87,10 @@ void detect_extended_topology(struct cpuinfo_x86 *c)
c->x86_max_cores = (core_level_siblings / smp_num_siblings); c->x86_max_cores = (core_level_siblings / smp_num_siblings);
if (!printed) { if (!printed) {
printk(KERN_INFO "CPU: Physical Processor ID: %d\n", pr_info("CPU: Physical Processor ID: %d\n",
c->phys_proc_id); c->phys_proc_id);
if (c->x86_max_cores > 1) if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n", pr_info("CPU: Processor Core ID: %d\n",
c->cpu_core_id); c->cpu_core_id);
printed = 1; printed = 1;
} }
......
...@@ -33,7 +33,7 @@ static void init_transmeta(struct cpuinfo_x86 *c) ...@@ -33,7 +33,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
if (max >= 0x80860001) { if (max >= 0x80860001) {
cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
if (cpu_rev != 0x02000000) { if (cpu_rev != 0x02000000) {
printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n", pr_info("CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
(cpu_rev >> 24) & 0xff, (cpu_rev >> 24) & 0xff,
(cpu_rev >> 16) & 0xff, (cpu_rev >> 16) & 0xff,
(cpu_rev >> 8) & 0xff, (cpu_rev >> 8) & 0xff,
...@@ -44,10 +44,10 @@ static void init_transmeta(struct cpuinfo_x86 *c) ...@@ -44,10 +44,10 @@ static void init_transmeta(struct cpuinfo_x86 *c)
if (max >= 0x80860002) { if (max >= 0x80860002) {
cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
if (cpu_rev == 0x02000000) { if (cpu_rev == 0x02000000) {
printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n", pr_info("CPU: Processor revision %08X, %u MHz\n",
new_cpu_rev, cpu_freq); new_cpu_rev, cpu_freq);
} }
printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n", pr_info("CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
(cms_rev1 >> 24) & 0xff, (cms_rev1 >> 24) & 0xff,
(cms_rev1 >> 16) & 0xff, (cms_rev1 >> 16) & 0xff,
(cms_rev1 >> 8) & 0xff, (cms_rev1 >> 8) & 0xff,
...@@ -76,7 +76,7 @@ static void init_transmeta(struct cpuinfo_x86 *c) ...@@ -76,7 +76,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
(void *)&cpu_info[56], (void *)&cpu_info[56],
(void *)&cpu_info[60]); (void *)&cpu_info[60]);
cpu_info[64] = '\0'; cpu_info[64] = '\0';
printk(KERN_INFO "CPU: %s\n", cpu_info); pr_info("CPU: %s\n", cpu_info);
} }
/* Unhide possibly hidden capability flags */ /* Unhide possibly hidden capability flags */
......
...@@ -62,7 +62,7 @@ static unsigned long vmware_get_tsc_khz(void) ...@@ -62,7 +62,7 @@ static unsigned long vmware_get_tsc_khz(void)
tsc_hz = eax | (((uint64_t)ebx) << 32); tsc_hz = eax | (((uint64_t)ebx) << 32);
do_div(tsc_hz, 1000); do_div(tsc_hz, 1000);
BUG_ON(tsc_hz >> 32); BUG_ON(tsc_hz >> 32);
printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n", pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
(unsigned long) tsc_hz / 1000, (unsigned long) tsc_hz / 1000,
(unsigned long) tsc_hz % 1000); (unsigned long) tsc_hz % 1000);
...@@ -84,8 +84,7 @@ static void __init vmware_platform_setup(void) ...@@ -84,8 +84,7 @@ static void __init vmware_platform_setup(void)
if (ebx != UINT_MAX) if (ebx != UINT_MAX)
x86_platform.calibrate_tsc = vmware_get_tsc_khz; x86_platform.calibrate_tsc = vmware_get_tsc_khz;
else else
printk(KERN_WARNING pr_warn("Failed to get TSC freq from the hypervisor\n");
"Failed to get TSC freq from the hypervisor\n");
} }
/* /*
......
...@@ -408,7 +408,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) ...@@ -408,7 +408,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
processor.cpuflag = CPU_ENABLED; processor.cpuflag = CPU_ENABLED;
processor.cpufeature = (boot_cpu_data.x86 << 8) | processor.cpufeature = (boot_cpu_data.x86 << 8) |
(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
processor.featureflag = boot_cpu_data.x86_capability[0]; processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
processor.reserved[0] = 0; processor.reserved[0] = 0;
processor.reserved[1] = 0; processor.reserved[1] = 0;
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
......
...@@ -1535,7 +1535,7 @@ __init void lguest_init(void) ...@@ -1535,7 +1535,7 @@ __init void lguest_init(void)
*/ */
cpu_detect(&new_cpu_data); cpu_detect(&new_cpu_data);
/* head.S usually sets up the first capability word, so do it here. */ /* head.S usually sets up the first capability word, so do it here. */
new_cpu_data.x86_capability[0] = cpuid_edx(1); new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
/* Math is always hard! */ /* Math is always hard! */
set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU); set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
......
...@@ -1654,7 +1654,7 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1654,7 +1654,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
cpu_detect(&new_cpu_data); cpu_detect(&new_cpu_data);
set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU); set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
new_cpu_data.wp_works_ok = 1; new_cpu_data.wp_works_ok = 1;
new_cpu_data.x86_capability[0] = cpuid_edx(1); new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
#endif #endif
if (xen_start_info->mod_start) { if (xen_start_info->mod_start) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment