Commit 6b5dd06d authored by Linus Torvalds's avatar Linus Torvalds

[PATCH] set-bit cleanup I: x86_capability.

Cosmetic change: x86_capability.  Makes it an unsigned long, and
removes the gratuitous & operators (it is already an array).  These
produce warnings when set_bit() etc. takes an unsigned long * instead
of a void *.

Originally from Rusty Russell
parent 18f3e443
...@@ -637,7 +637,7 @@ static int __init detect_init_APIC (void) ...@@ -637,7 +637,7 @@ static int __init detect_init_APIC (void)
printk("Could not enable APIC!\n"); printk("Could not enable APIC!\n");
return -1; return -1;
} }
set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability); set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
boot_cpu_physical_apicid = 0; boot_cpu_physical_apicid = 0;
if (nmi_watchdog != NMI_NONE) if (nmi_watchdog != NMI_NONE)
......
...@@ -284,7 +284,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c) ...@@ -284,7 +284,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c)
* Check for MCE support * Check for MCE support
*/ */
if( !test_bit(X86_FEATURE_MCE, &c->x86_capability) ) if( !test_bit(X86_FEATURE_MCE, c->x86_capability) )
return; return;
/* /*
...@@ -314,7 +314,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c) ...@@ -314,7 +314,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c)
* Check for PPro style MCA * Check for PPro style MCA
*/ */
if( !test_bit(X86_FEATURE_MCA, &c->x86_capability) ) if( !test_bit(X86_FEATURE_MCA, c->x86_capability) )
return; return;
/* Ok machine check is available */ /* Ok machine check is available */
......
...@@ -387,7 +387,7 @@ static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt) ...@@ -387,7 +387,7 @@ static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt)
return; return;
/* Save value of CR4 and clear Page Global Enable (bit 7) */ /* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) ) { if ( test_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability) ) {
ctxt->cr4val = read_cr4(); ctxt->cr4val = read_cr4();
write_cr4(ctxt->cr4val & (unsigned char) ~(1<<7)); write_cr4(ctxt->cr4val & (unsigned char) ~(1<<7));
} }
...@@ -448,7 +448,7 @@ static void set_mtrr_done (struct set_mtrr_context *ctxt) ...@@ -448,7 +448,7 @@ static void set_mtrr_done (struct set_mtrr_context *ctxt)
write_cr0( read_cr0() & 0xbfffffff ); write_cr0( read_cr0() & 0xbfffffff );
/* Restore value of CR4 */ /* Restore value of CR4 */
if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) ) if ( test_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability) )
write_cr4(ctxt->cr4val); write_cr4(ctxt->cr4val);
/* Re-enable interrupts locally (if enabled previously) */ /* Re-enable interrupts locally (if enabled previously) */
...@@ -2122,7 +2122,7 @@ static void __init centaur_mcr_init(void) ...@@ -2122,7 +2122,7 @@ static void __init centaur_mcr_init(void)
static int __init mtrr_setup(void) static int __init mtrr_setup(void)
{ {
if ( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ) { if ( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ) {
/* Intel (P6) standard MTRRs */ /* Intel (P6) standard MTRRs */
mtrr_if = MTRR_IF_INTEL; mtrr_if = MTRR_IF_INTEL;
get_mtrr = intel_get_mtrr; get_mtrr = intel_get_mtrr;
...@@ -2166,14 +2166,14 @@ static int __init mtrr_setup(void) ...@@ -2166,14 +2166,14 @@ static int __init mtrr_setup(void)
break; break;
} }
} else if ( test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ) { } else if ( test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ) {
/* Pre-Athlon (K6) AMD CPU MTRRs */ /* Pre-Athlon (K6) AMD CPU MTRRs */
mtrr_if = MTRR_IF_AMD_K6; mtrr_if = MTRR_IF_AMD_K6;
get_mtrr = amd_get_mtrr; get_mtrr = amd_get_mtrr;
set_mtrr_up = amd_set_mtrr_up; set_mtrr_up = amd_set_mtrr_up;
size_or_mask = 0xfff00000; /* 32 bits */ size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0; size_and_mask = 0;
} else if ( test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ) { } else if ( test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ) {
/* Cyrix ARRs */ /* Cyrix ARRs */
mtrr_if = MTRR_IF_CYRIX_ARR; mtrr_if = MTRR_IF_CYRIX_ARR;
get_mtrr = cyrix_get_arr; get_mtrr = cyrix_get_arr;
...@@ -2182,7 +2182,7 @@ static int __init mtrr_setup(void) ...@@ -2182,7 +2182,7 @@ static int __init mtrr_setup(void)
cyrix_arr_init(); cyrix_arr_init();
size_or_mask = 0xfff00000; /* 32 bits */ size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0; size_and_mask = 0;
} else if ( test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) { } else if ( test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) {
/* Centaur MCRs */ /* Centaur MCRs */
mtrr_if = MTRR_IF_CENTAUR_MCR; mtrr_if = MTRR_IF_CENTAUR_MCR;
get_mtrr = centaur_get_mcr; get_mtrr = centaur_get_mcr;
......
This diff is collapsed.
...@@ -173,10 +173,10 @@ static inline int noncached_address(unsigned long addr) ...@@ -173,10 +173,10 @@ static inline int noncached_address(unsigned long addr)
* caching for the high addresses through the KEN pin, but * caching for the high addresses through the KEN pin, but
* we maintain the tradition of paranoia in this code. * we maintain the tradition of paranoia in this code.
*/ */
return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) || return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) || test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) || test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
&& addr >= __pa(high_memory); && addr >= __pa(high_memory);
#else #else
return addr >= __pa(high_memory); return addr >= __pa(high_memory);
......
...@@ -736,7 +736,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) ...@@ -736,7 +736,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
int entropy = 0; int entropy = 0;
#if defined (__i386__) #if defined (__i386__)
if ( test_bit(X86_FEATURE_TSC, &boot_cpu_data.x86_capability) ) { if ( test_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability) ) {
__u32 high; __u32 high;
rdtsc(time, high); rdtsc(time, high);
num ^= high; num ^= high;
......
...@@ -194,7 +194,7 @@ static void __init check_config(void) ...@@ -194,7 +194,7 @@ static void __init check_config(void)
*/ */
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC) #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
&& test_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability) && test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability)
&& boot_cpu_data.x86 == 5 && boot_cpu_data.x86 == 5
&& boot_cpu_data.x86_model == 2 && boot_cpu_data.x86_model == 2
&& (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
......
...@@ -40,7 +40,7 @@ struct cpuinfo_x86 { ...@@ -40,7 +40,7 @@ struct cpuinfo_x86 {
char hard_math; char hard_math;
char rfu; char rfu;
int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
__u32 x86_capability[NCAPINTS]; unsigned long x86_capability[NCAPINTS];
char x86_vendor_id[16]; char x86_vendor_id[16];
char x86_model_id[64]; char x86_model_id[64];
int x86_cache_size; /* in KB - valid for CPUS which support this int x86_cache_size; /* in KB - valid for CPUS which support this
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment