Commit 6b5dd06d authored by Linus Torvalds's avatar Linus Torvalds

[PATCH] set-bit cleanup I: x86_capability.

Cosmetic change: x86_capability.  Makes it an unsigned long, and
removes the gratuitous & operators (it is already an array).  These
produce warnings when set_bit() etc. takes an unsigned long * instead
of a void *.

Originally from Rusty Russell
parent 18f3e443
......@@ -637,7 +637,7 @@ static int __init detect_init_APIC (void)
printk("Could not enable APIC!\n");
return -1;
}
set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability);
set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
boot_cpu_physical_apicid = 0;
if (nmi_watchdog != NMI_NONE)
......
......@@ -284,7 +284,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c)
* Check for MCE support
*/
if( !test_bit(X86_FEATURE_MCE, &c->x86_capability) )
if( !test_bit(X86_FEATURE_MCE, c->x86_capability) )
return;
/*
......@@ -314,7 +314,7 @@ static void __init intel_mcheck_init(struct cpuinfo_x86 *c)
* Check for PPro style MCA
*/
if( !test_bit(X86_FEATURE_MCA, &c->x86_capability) )
if( !test_bit(X86_FEATURE_MCA, c->x86_capability) )
return;
/* Ok machine check is available */
......
......@@ -387,7 +387,7 @@ static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt)
return;
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) ) {
if ( test_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability) ) {
ctxt->cr4val = read_cr4();
write_cr4(ctxt->cr4val & (unsigned char) ~(1<<7));
}
......@@ -448,7 +448,7 @@ static void set_mtrr_done (struct set_mtrr_context *ctxt)
write_cr0( read_cr0() & 0xbfffffff );
/* Restore value of CR4 */
if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) )
if ( test_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability) )
write_cr4(ctxt->cr4val);
/* Re-enable interrupts locally (if enabled previously) */
......@@ -2122,7 +2122,7 @@ static void __init centaur_mcr_init(void)
static int __init mtrr_setup(void)
{
if ( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ) {
if ( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ) {
/* Intel (P6) standard MTRRs */
mtrr_if = MTRR_IF_INTEL;
get_mtrr = intel_get_mtrr;
......@@ -2166,14 +2166,14 @@ static int __init mtrr_setup(void)
break;
}
} else if ( test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ) {
} else if ( test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
mtrr_if = MTRR_IF_AMD_K6;
get_mtrr = amd_get_mtrr;
set_mtrr_up = amd_set_mtrr_up;
size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0;
} else if ( test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ) {
} else if ( test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ) {
/* Cyrix ARRs */
mtrr_if = MTRR_IF_CYRIX_ARR;
get_mtrr = cyrix_get_arr;
......@@ -2182,7 +2182,7 @@ static int __init mtrr_setup(void)
cyrix_arr_init();
size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0;
} else if ( test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) {
} else if ( test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) {
/* Centaur MCRs */
mtrr_if = MTRR_IF_CENTAUR_MCR;
get_mtrr = centaur_get_mcr;
......
This diff is collapsed.
......@@ -173,10 +173,10 @@ static inline int noncached_address(unsigned long addr)
* caching for the high addresses through the KEN pin, but
* we maintain the tradition of paranoia in this code.
*/
return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) )
return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
&& addr >= __pa(high_memory);
#else
return addr >= __pa(high_memory);
......
......@@ -736,7 +736,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
int entropy = 0;
#if defined (__i386__)
if ( test_bit(X86_FEATURE_TSC, &boot_cpu_data.x86_capability) ) {
if ( test_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability) ) {
__u32 high;
rdtsc(time, high);
num ^= high;
......
......@@ -194,7 +194,7 @@ static void __init check_config(void)
*/
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
&& test_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability)
&& test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability)
&& boot_cpu_data.x86 == 5
&& boot_cpu_data.x86_model == 2
&& (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
......
......@@ -40,7 +40,7 @@ struct cpuinfo_x86 {
char hard_math;
char rfu;
int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
__u32 x86_capability[NCAPINTS];
unsigned long x86_capability[NCAPINTS];
char x86_vendor_id[16];
char x86_model_id[64];
int x86_cache_size; /* in KB - valid for CPUS which support this
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment