Commit 6b5dd06d authored by Linus Torvalds's avatar Linus Torvalds

[PATCH] set-bit cleanup I: x86_capability.

Cosmetic change: x86_capability.  Makes it an unsigned long, and
removes the gratuitous & operators (it is already an array).  These
produce warnings when set_bit() etc. takes an unsigned long * instead
of a void *.

Originally from Rusty Russell
parent 18f3e443
...@@ -637,7 +637,7 @@ static int __init detect_init_APIC (void) ...@@ -637,7 +637,7 @@ static int __init detect_init_APIC (void)
printk("Could not enable APIC!\n"); printk("Could not enable APIC!\n");
return -1; return -1;
} }
set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability); set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
boot_cpu_physical_apicid = 0; boot_cpu_physical_apicid = 0;
if (nmi_watchdog != NMI_NONE) if (nmi_watchdog != NMI_NONE)
......
...@@ -284,7 +284,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c) ...@@ -284,7 +284,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c)
* Check for MCE support * Check for MCE support
*/ */
if( !test_bit(X86_FEATURE_MCE, &c->x86_capability) ) if( !test_bit(X86_FEATURE_MCE, c->x86_capability) )
return; return;
/* /*
...@@ -314,7 +314,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c) ...@@ -314,7 +314,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c)
* Check for PPro style MCA * Check for PPro style MCA
*/ */
if( !test_bit(X86_FEATURE_MCA, &c->x86_capability) ) if( !test_bit(X86_FEATURE_MCA, c->x86_capability) )
return; return;
/* Ok machine check is available */ /* Ok machine check is available */
......
...@@ -387,7 +387,7 @@ static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt) ...@@ -387,7 +387,7 @@ static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt)
return; return;
/* Save value of CR4 and clear Page Global Enable (bit 7) */ /* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) ) { if ( test_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability) ) {
ctxt->cr4val = read_cr4(); ctxt->cr4val = read_cr4();
write_cr4(ctxt->cr4val & (unsigned char) ~(1<<7)); write_cr4(ctxt->cr4val & (unsigned char) ~(1<<7));
} }
...@@ -448,7 +448,7 @@ static void set_mtrr_done (struct set_mtrr_context *ctxt) ...@@ -448,7 +448,7 @@ static void set_mtrr_done (struct set_mtrr_context *ctxt)
write_cr0( read_cr0() & 0xbfffffff ); write_cr0( read_cr0() & 0xbfffffff );
/* Restore value of CR4 */ /* Restore value of CR4 */
if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) ) if ( test_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability) )
write_cr4(ctxt->cr4val); write_cr4(ctxt->cr4val);
/* Re-enable interrupts locally (if enabled previously) */ /* Re-enable interrupts locally (if enabled previously) */
...@@ -2122,7 +2122,7 @@ static void __init centaur_mcr_init(void) ...@@ -2122,7 +2122,7 @@ static void __init centaur_mcr_init(void)
static int __init mtrr_setup(void) static int __init mtrr_setup(void)
{ {
if ( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ) { if ( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ) {
/* Intel (P6) standard MTRRs */ /* Intel (P6) standard MTRRs */
mtrr_if = MTRR_IF_INTEL; mtrr_if = MTRR_IF_INTEL;
get_mtrr = intel_get_mtrr; get_mtrr = intel_get_mtrr;
...@@ -2166,14 +2166,14 @@ static int __init mtrr_setup(void) ...@@ -2166,14 +2166,14 @@ static int __init mtrr_setup(void)
break; break;
} }
} else if ( test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ) { } else if ( test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ) {
/* Pre-Athlon (K6) AMD CPU MTRRs */ /* Pre-Athlon (K6) AMD CPU MTRRs */
mtrr_if = MTRR_IF_AMD_K6; mtrr_if = MTRR_IF_AMD_K6;
get_mtrr = amd_get_mtrr; get_mtrr = amd_get_mtrr;
set_mtrr_up = amd_set_mtrr_up; set_mtrr_up = amd_set_mtrr_up;
size_or_mask = 0xfff00000; /* 32 bits */ size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0; size_and_mask = 0;
} else if ( test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ) { } else if ( test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ) {
/* Cyrix ARRs */ /* Cyrix ARRs */
mtrr_if = MTRR_IF_CYRIX_ARR; mtrr_if = MTRR_IF_CYRIX_ARR;
get_mtrr = cyrix_get_arr; get_mtrr = cyrix_get_arr;
...@@ -2182,7 +2182,7 @@ static int __init mtrr_setup(void) ...@@ -2182,7 +2182,7 @@ static int __init mtrr_setup(void)
cyrix_arr_init(); cyrix_arr_init();
size_or_mask = 0xfff00000; /* 32 bits */ size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0; size_and_mask = 0;
} else if ( test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) { } else if ( test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) {
/* Centaur MCRs */ /* Centaur MCRs */
mtrr_if = MTRR_IF_CENTAUR_MCR; mtrr_if = MTRR_IF_CENTAUR_MCR;
get_mtrr = centaur_get_mcr; get_mtrr = centaur_get_mcr;
......
...@@ -612,7 +612,7 @@ static void __init parse_mem_cmdline (char ** cmdline_p) ...@@ -612,7 +612,7 @@ static void __init parse_mem_cmdline (char ** cmdline_p)
to--; to--;
if (!memcmp(from+4, "nopentium", 9)) { if (!memcmp(from+4, "nopentium", 9)) {
from += 9+4; from += 9+4;
clear_bit(X86_FEATURE_PSE, &boot_cpu_data.x86_capability); clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
} else if (!memcmp(from+4, "exactmap", 8)) { } else if (!memcmp(from+4, "exactmap", 8)) {
from += 8+4; from += 8+4;
e820.nr_map = 0; e820.nr_map = 0;
...@@ -1121,7 +1121,7 @@ static int __init init_amd(struct cpuinfo_x86 *c) ...@@ -1121,7 +1121,7 @@ static int __init init_amd(struct cpuinfo_x86 *c)
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID; /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, &c->x86_capability); clear_bit(0*32+31, c->x86_capability);
r = get_model_name(c); r = get_model_name(c);
...@@ -1132,8 +1132,8 @@ static int __init init_amd(struct cpuinfo_x86 *c) ...@@ -1132,8 +1132,8 @@ static int __init init_amd(struct cpuinfo_x86 *c)
{ {
/* Based on AMD doc 20734R - June 2000 */ /* Based on AMD doc 20734R - June 2000 */
if ( c->x86_model == 0 ) { if ( c->x86_model == 0 ) {
clear_bit(X86_FEATURE_APIC, &c->x86_capability); clear_bit(X86_FEATURE_APIC, c->x86_capability);
set_bit(X86_FEATURE_PGE, &c->x86_capability); set_bit(X86_FEATURE_PGE, c->x86_capability);
} }
break; break;
} }
...@@ -1213,7 +1213,7 @@ static int __init init_amd(struct cpuinfo_x86 *c) ...@@ -1213,7 +1213,7 @@ static int __init init_amd(struct cpuinfo_x86 *c)
/* Set MTRR capability flag if appropriate */ /* Set MTRR capability flag if appropriate */
if (c->x86_model == 13 || c->x86_model == 9 || if (c->x86_model == 13 || c->x86_model == 9 ||
(c->x86_model == 8 && c->x86_mask >= 8)) (c->x86_model == 8 && c->x86_mask >= 8))
set_bit(X86_FEATURE_K6_MTRR, &c->x86_capability); set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
break; break;
} }
break; break;
...@@ -1226,12 +1226,12 @@ static int __init init_amd(struct cpuinfo_x86 *c) ...@@ -1226,12 +1226,12 @@ static int __init init_amd(struct cpuinfo_x86 *c)
* here. * here.
*/ */
if (c->x86_model == 6 || c->x86_model == 7) { if (c->x86_model == 6 || c->x86_model == 7) {
if (!test_bit(X86_FEATURE_XMM, &c->x86_capability)) { if (!test_bit(X86_FEATURE_XMM, c->x86_capability)) {
printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
rdmsr(MSR_K7_HWCR, l, h); rdmsr(MSR_K7_HWCR, l, h);
l &= ~0x00008000; l &= ~0x00008000;
wrmsr(MSR_K7_HWCR, l, h); wrmsr(MSR_K7_HWCR, l, h);
set_bit(X86_FEATURE_XMM, &c->x86_capability); set_bit(X86_FEATURE_XMM, c->x86_capability);
} }
} }
break; break;
...@@ -1348,12 +1348,12 @@ static void __init init_cyrix(struct cpuinfo_x86 *c) ...@@ -1348,12 +1348,12 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID; /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, &c->x86_capability); clear_bit(0*32+31, c->x86_capability);
/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
if ( test_bit(1*32+24, &c->x86_capability) ) { if ( test_bit(1*32+24, c->x86_capability) ) {
clear_bit(1*32+24, &c->x86_capability); clear_bit(1*32+24, c->x86_capability);
set_bit(X86_FEATURE_CXMMX, &c->x86_capability); set_bit(X86_FEATURE_CXMMX, c->x86_capability);
} }
do_cyrix_devid(&dir0, &dir1); do_cyrix_devid(&dir0, &dir1);
...@@ -1400,7 +1400,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c) ...@@ -1400,7 +1400,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
} else /* 686 */ } else /* 686 */
p = Cx86_cb+1; p = Cx86_cb+1;
/* Emulate MTRRs using Cyrix's ARRs. */ /* Emulate MTRRs using Cyrix's ARRs. */
set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability); set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
/* 6x86's contain this bug */ /* 6x86's contain this bug */
c->coma_bug = 1; c->coma_bug = 1;
break; break;
...@@ -1444,7 +1444,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c) ...@@ -1444,7 +1444,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
p = Cx86_cb+2; p = Cx86_cb+2;
c->x86_model = (dir1 & 0x20) ? 1 : 2; c->x86_model = (dir1 & 0x20) ? 1 : 2;
#ifndef CONFIG_CS5520 #ifndef CONFIG_CS5520
clear_bit(X86_FEATURE_TSC, &c->x86_capability); clear_bit(X86_FEATURE_TSC, c->x86_capability);
#endif #endif
} }
break; break;
...@@ -1466,7 +1466,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c) ...@@ -1466,7 +1466,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
(c->x86_model)++; (c->x86_model)++;
/* Emulate MTRRs using Cyrix's ARRs. */ /* Emulate MTRRs using Cyrix's ARRs. */
set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability); set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
break; break;
case 0xf: /* Cyrix 486 without DEVID registers */ case 0xf: /* Cyrix 486 without DEVID registers */
...@@ -1765,7 +1765,7 @@ static void __init init_centaur(struct cpuinfo_x86 *c) ...@@ -1765,7 +1765,7 @@ static void __init init_centaur(struct cpuinfo_x86 *c)
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID; /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, &c->x86_capability); clear_bit(0*32+31, c->x86_capability);
switch (c->x86) { switch (c->x86) {
...@@ -1776,7 +1776,7 @@ static void __init init_centaur(struct cpuinfo_x86 *c) ...@@ -1776,7 +1776,7 @@ static void __init init_centaur(struct cpuinfo_x86 *c)
fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK; fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
fcr_clr=DPDC; fcr_clr=DPDC;
printk(KERN_NOTICE "Disabling bugged TSC.\n"); printk(KERN_NOTICE "Disabling bugged TSC.\n");
clear_bit(X86_FEATURE_TSC, &c->x86_capability); clear_bit(X86_FEATURE_TSC, c->x86_capability);
#ifdef CONFIG_X86_OOSTORE #ifdef CONFIG_X86_OOSTORE
winchip_create_optimal_mcr(); winchip_create_optimal_mcr();
/* Enable /* Enable
...@@ -1855,12 +1855,12 @@ static void __init init_centaur(struct cpuinfo_x86 *c) ...@@ -1855,12 +1855,12 @@ static void __init init_centaur(struct cpuinfo_x86 *c)
printk(KERN_INFO "Centaur FCR is 0x%X\n",lo); printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
} }
/* Emulate MTRRs using Centaur's MCR. */ /* Emulate MTRRs using Centaur's MCR. */
set_bit(X86_FEATURE_CENTAUR_MCR, &c->x86_capability); set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
/* Report CX8 */ /* Report CX8 */
set_bit(X86_FEATURE_CX8, &c->x86_capability); set_bit(X86_FEATURE_CX8, c->x86_capability);
/* Set 3DNow! on Winchip 2 and above. */ /* Set 3DNow! on Winchip 2 and above. */
if (c->x86_model >=8) if (c->x86_model >=8)
set_bit(X86_FEATURE_3DNOW, &c->x86_capability); set_bit(X86_FEATURE_3DNOW, c->x86_capability);
/* See if we can find out some more. */ /* See if we can find out some more. */
if ( cpuid_eax(0x80000000) >= 0x80000005 ) { if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
/* Yes, we can. */ /* Yes, we can. */
...@@ -1878,8 +1878,8 @@ static void __init init_centaur(struct cpuinfo_x86 *c) ...@@ -1878,8 +1878,8 @@ static void __init init_centaur(struct cpuinfo_x86 *c)
lo |= (1<<1 | 1<<7); /* Report CX8 & enable PGE */ lo |= (1<<1 | 1<<7); /* Report CX8 & enable PGE */
wrmsr (MSR_VIA_FCR, lo, hi); wrmsr (MSR_VIA_FCR, lo, hi);
set_bit(X86_FEATURE_CX8, &c->x86_capability); set_bit(X86_FEATURE_CX8, c->x86_capability);
set_bit(X86_FEATURE_3DNOW, &c->x86_capability); set_bit(X86_FEATURE_3DNOW, c->x86_capability);
get_model_name(c); get_model_name(c);
display_cacheinfo(c); display_cacheinfo(c);
...@@ -1973,7 +1973,7 @@ static void __init init_rise(struct cpuinfo_x86 *c) ...@@ -1973,7 +1973,7 @@ static void __init init_rise(struct cpuinfo_x86 *c)
"movl $0x2333313a, %%edx\n\t" "movl $0x2333313a, %%edx\n\t"
"cpuid\n\t" : : : "eax", "ebx", "ecx", "edx" "cpuid\n\t" : : : "eax", "ebx", "ecx", "edx"
); );
set_bit(X86_FEATURE_CX8, &c->x86_capability); set_bit(X86_FEATURE_CX8, c->x86_capability);
} }
...@@ -2123,7 +2123,7 @@ static void __init init_intel(struct cpuinfo_x86 *c) ...@@ -2123,7 +2123,7 @@ static void __init init_intel(struct cpuinfo_x86 *c)
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */ /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 ) if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
clear_bit(X86_FEATURE_SEP, &c->x86_capability); clear_bit(X86_FEATURE_SEP, c->x86_capability);
/* Names for the Pentium II/Celeron processors /* Names for the Pentium II/Celeron processors
detectable only by also checking the cache size. detectable only by also checking the cache size.
...@@ -2153,7 +2153,7 @@ static void __init init_intel(struct cpuinfo_x86 *c) ...@@ -2153,7 +2153,7 @@ static void __init init_intel(struct cpuinfo_x86 *c)
strcpy(c->x86_model_id, p); strcpy(c->x86_model_id, p);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (test_bit(X86_FEATURE_HT, &c->x86_capability)) { if (test_bit(X86_FEATURE_HT, c->x86_capability)) {
extern int phys_proc_id[NR_CPUS]; extern int phys_proc_id[NR_CPUS];
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
...@@ -2322,7 +2322,7 @@ static int __init deep_magic_nexgen_probe(void) ...@@ -2322,7 +2322,7 @@ static int __init deep_magic_nexgen_probe(void)
static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c) static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{ {
if( test_bit(X86_FEATURE_PN, &c->x86_capability) && if( test_bit(X86_FEATURE_PN, c->x86_capability) &&
disable_x86_serial_nr ) { disable_x86_serial_nr ) {
/* Disable processor serial number */ /* Disable processor serial number */
unsigned long lo,hi; unsigned long lo,hi;
...@@ -2330,7 +2330,7 @@ static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c) ...@@ -2330,7 +2330,7 @@ static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
lo |= 0x200000; lo |= 0x200000;
wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
printk(KERN_NOTICE "CPU serial number disabled.\n"); printk(KERN_NOTICE "CPU serial number disabled.\n");
clear_bit(X86_FEATURE_PN, &c->x86_capability); clear_bit(X86_FEATURE_PN, c->x86_capability);
/* Disabling the serial number may affect the cpuid level */ /* Disabling the serial number may affect the cpuid level */
c->cpuid_level = cpuid_eax(0); c->cpuid_level = cpuid_eax(0);
...@@ -2496,8 +2496,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -2496,8 +2496,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
/* Intel-defined flags: level 0x00000001 */ /* Intel-defined flags: level 0x00000001 */
if ( c->cpuid_level >= 0x00000001 ) { if ( c->cpuid_level >= 0x00000001 ) {
cpuid(0x00000001, &tfms, &junk, &junk, u32 capability;
&c->x86_capability[0]); cpuid(0x00000001, &tfms, &junk, &junk, &capability);
c->x86_capability[0] = capability;
c->x86 = (tfms >> 8) & 15; c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15; c->x86_model = (tfms >> 4) & 15;
c->x86_mask = tfms & 15; c->x86_mask = tfms & 15;
...@@ -2523,7 +2524,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -2523,7 +2524,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
} }
} }
printk(KERN_DEBUG "CPU: Before vendor init, caps: %08x %08x %08x, vendor = %d\n", printk(KERN_DEBUG "CPU: Before vendor init, caps: %08lx %08lx %08lx, vendor = %d\n",
c->x86_capability[0], c->x86_capability[0],
c->x86_capability[1], c->x86_capability[1],
c->x86_capability[2], c->x86_capability[2],
...@@ -2588,7 +2589,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -2588,7 +2589,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
} }
printk(KERN_DEBUG "CPU: After vendor init, caps: %08x %08x %08x %08x\n", printk(KERN_DEBUG "CPU: After vendor init, caps: %08lx %08lx %08lx %08lx\n",
c->x86_capability[0], c->x86_capability[0],
c->x86_capability[1], c->x86_capability[1],
c->x86_capability[2], c->x86_capability[2],
...@@ -2602,13 +2603,13 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -2602,13 +2603,13 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
/* TSC disabled? */ /* TSC disabled? */
#ifndef CONFIG_X86_TSC #ifndef CONFIG_X86_TSC
if ( tsc_disable ) if ( tsc_disable )
clear_bit(X86_FEATURE_TSC, &c->x86_capability); clear_bit(X86_FEATURE_TSC, c->x86_capability);
#endif #endif
/* FXSR disabled? */ /* FXSR disabled? */
if (disable_x86_fxsr) { if (disable_x86_fxsr) {
clear_bit(X86_FEATURE_FXSR, &c->x86_capability); clear_bit(X86_FEATURE_FXSR, c->x86_capability);
clear_bit(X86_FEATURE_XMM, &c->x86_capability); clear_bit(X86_FEATURE_XMM, c->x86_capability);
} }
/* Disable the PN if appropriate */ /* Disable the PN if appropriate */
...@@ -2631,7 +2632,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -2631,7 +2632,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
/* Now the feature flags better reflect actual CPU features! */ /* Now the feature flags better reflect actual CPU features! */
printk(KERN_DEBUG "CPU: After generic, caps: %08x %08x %08x %08x\n", printk(KERN_DEBUG "CPU: After generic, caps: %08lx %08lx %08lx %08lx\n",
c->x86_capability[0], c->x86_capability[0],
c->x86_capability[1], c->x86_capability[1],
c->x86_capability[2], c->x86_capability[2],
...@@ -2649,7 +2650,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -2649,7 +2650,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
} }
printk(KERN_DEBUG "CPU: Common caps: %08x %08x %08x %08x\n", printk(KERN_DEBUG "CPU: Common caps: %08lx %08lx %08lx %08lx\n",
boot_cpu_data.x86_capability[0], boot_cpu_data.x86_capability[0],
boot_cpu_data.x86_capability[1], boot_cpu_data.x86_capability[1],
boot_cpu_data.x86_capability[2], boot_cpu_data.x86_capability[2],
...@@ -2759,7 +2760,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -2759,7 +2760,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
else else
seq_printf(m, "stepping\t: unknown\n"); seq_printf(m, "stepping\t: unknown\n");
if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) { if ( test_bit(X86_FEATURE_TSC, c->x86_capability) ) {
seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n", seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
cpu_khz / 1000, (cpu_khz % 1000)); cpu_khz / 1000, (cpu_khz % 1000));
} }
...@@ -2789,7 +2790,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -2789,7 +2790,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
c->wp_works_ok ? "yes" : "no"); c->wp_works_ok ? "yes" : "no");
for ( i = 0 ; i < 32*NCAPINTS ; i++ ) for ( i = 0 ; i < 32*NCAPINTS ; i++ )
if ( test_bit(i, &c->x86_capability) && if ( test_bit(i, c->x86_capability) &&
x86_cap_flags[i] != NULL ) x86_cap_flags[i] != NULL )
seq_printf(m, " %s", x86_cap_flags[i]); seq_printf(m, " %s", x86_cap_flags[i]);
......
...@@ -173,10 +173,10 @@ static inline int noncached_address(unsigned long addr) ...@@ -173,10 +173,10 @@ static inline int noncached_address(unsigned long addr)
* caching for the high addresses through the KEN pin, but * caching for the high addresses through the KEN pin, but
* we maintain the tradition of paranoia in this code. * we maintain the tradition of paranoia in this code.
*/ */
return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) || return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) || test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) || test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
&& addr >= __pa(high_memory); && addr >= __pa(high_memory);
#else #else
return addr >= __pa(high_memory); return addr >= __pa(high_memory);
......
...@@ -736,7 +736,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) ...@@ -736,7 +736,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
int entropy = 0; int entropy = 0;
#if defined (__i386__) #if defined (__i386__)
if ( test_bit(X86_FEATURE_TSC, &boot_cpu_data.x86_capability) ) { if ( test_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability) ) {
__u32 high; __u32 high;
rdtsc(time, high); rdtsc(time, high);
num ^= high; num ^= high;
......
...@@ -194,7 +194,7 @@ static void __init check_config(void) ...@@ -194,7 +194,7 @@ static void __init check_config(void)
*/ */
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC) #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
&& test_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability) && test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability)
&& boot_cpu_data.x86 == 5 && boot_cpu_data.x86 == 5
&& boot_cpu_data.x86_model == 2 && boot_cpu_data.x86_model == 2
&& (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
......
...@@ -40,7 +40,7 @@ struct cpuinfo_x86 { ...@@ -40,7 +40,7 @@ struct cpuinfo_x86 {
char hard_math; char hard_math;
char rfu; char rfu;
int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
__u32 x86_capability[NCAPINTS]; unsigned long x86_capability[NCAPINTS];
char x86_vendor_id[16]; char x86_vendor_id[16];
char x86_model_id[64]; char x86_model_id[64];
int x86_cache_size; /* in KB - valid for CPUS which support this int x86_cache_size; /* in KB - valid for CPUS which support this
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment