Commit 3bc9b76b authored by Chuck Ebbert's avatar Chuck Ebbert Committed by Linus Torvalds

[PATCH] i386: __devinit should be __cpuinit

Several places in arch/i386/kernel/cpu and kernel/cpu were using __devinit
when they should have been __cpuinit.  Fixing that saves ~4K when
CONFIG_HOTPLUG && !CONFIG_HOTPLUG_CPU.

Noticed by Andrew Morton.
Signed-off-by: default avatarChuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9a0b5817
...@@ -25,9 +25,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); ...@@ -25,9 +25,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
static int cachesize_override __devinitdata = -1; static int cachesize_override __cpuinitdata = -1;
static int disable_x86_fxsr __devinitdata = 0; static int disable_x86_fxsr __cpuinitdata = 0;
static int disable_x86_serial_nr __devinitdata = 1; static int disable_x86_serial_nr __cpuinitdata = 1;
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
...@@ -59,7 +59,7 @@ static int __init cachesize_setup(char *str) ...@@ -59,7 +59,7 @@ static int __init cachesize_setup(char *str)
} }
__setup("cachesize=", cachesize_setup); __setup("cachesize=", cachesize_setup);
int __devinit get_model_name(struct cpuinfo_x86 *c) int __cpuinit get_model_name(struct cpuinfo_x86 *c)
{ {
unsigned int *v; unsigned int *v;
char *p, *q; char *p, *q;
...@@ -89,7 +89,7 @@ int __devinit get_model_name(struct cpuinfo_x86 *c) ...@@ -89,7 +89,7 @@ int __devinit get_model_name(struct cpuinfo_x86 *c)
} }
void __devinit display_cacheinfo(struct cpuinfo_x86 *c) void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
{ {
unsigned int n, dummy, ecx, edx, l2size; unsigned int n, dummy, ecx, edx, l2size;
...@@ -130,7 +130,7 @@ void __devinit display_cacheinfo(struct cpuinfo_x86 *c) ...@@ -130,7 +130,7 @@ void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
/* Look up CPU names by table lookup. */ /* Look up CPU names by table lookup. */
static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
{ {
struct cpu_model_info *info; struct cpu_model_info *info;
...@@ -151,7 +151,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) ...@@ -151,7 +151,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
} }
static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
{ {
char *v = c->x86_vendor_id; char *v = c->x86_vendor_id;
int i; int i;
...@@ -210,7 +210,7 @@ static inline int flag_is_changeable_p(u32 flag) ...@@ -210,7 +210,7 @@ static inline int flag_is_changeable_p(u32 flag)
/* Probe for the CPUID instruction */ /* Probe for the CPUID instruction */
static int __devinit have_cpuid_p(void) static int __cpuinit have_cpuid_p(void)
{ {
return flag_is_changeable_p(X86_EFLAGS_ID); return flag_is_changeable_p(X86_EFLAGS_ID);
} }
...@@ -254,7 +254,7 @@ static void __init early_cpu_detect(void) ...@@ -254,7 +254,7 @@ static void __init early_cpu_detect(void)
} }
} }
void __devinit generic_identify(struct cpuinfo_x86 * c) void __cpuinit generic_identify(struct cpuinfo_x86 * c)
{ {
u32 tfms, xlvl; u32 tfms, xlvl;
int junk; int junk;
...@@ -307,7 +307,7 @@ void __devinit generic_identify(struct cpuinfo_x86 * c) ...@@ -307,7 +307,7 @@ void __devinit generic_identify(struct cpuinfo_x86 * c)
#endif #endif
} }
static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
{ {
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
/* Disable processor serial number */ /* Disable processor serial number */
...@@ -335,7 +335,7 @@ __setup("serialnumber", x86_serial_nr_setup); ...@@ -335,7 +335,7 @@ __setup("serialnumber", x86_serial_nr_setup);
/* /*
* This does the hard work of actually picking apart the CPU stuff... * This does the hard work of actually picking apart the CPU stuff...
*/ */
void __devinit identify_cpu(struct cpuinfo_x86 *c) void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
{ {
int i; int i;
...@@ -453,7 +453,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -453,7 +453,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
} }
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
void __devinit detect_ht(struct cpuinfo_x86 *c) void __cpuinit detect_ht(struct cpuinfo_x86 *c)
{ {
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
int index_msb, core_bits; int index_msb, core_bits;
...@@ -500,7 +500,7 @@ void __devinit detect_ht(struct cpuinfo_x86 *c) ...@@ -500,7 +500,7 @@ void __devinit detect_ht(struct cpuinfo_x86 *c)
} }
#endif #endif
void __devinit print_cpu_info(struct cpuinfo_x86 *c) void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
{ {
char *vendor = NULL; char *vendor = NULL;
...@@ -523,7 +523,7 @@ void __devinit print_cpu_info(struct cpuinfo_x86 *c) ...@@ -523,7 +523,7 @@ void __devinit print_cpu_info(struct cpuinfo_x86 *c)
printk("\n"); printk("\n");
} }
cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
/* This is hacky. :) /* This is hacky. :)
* We're emulating future behavior. * We're emulating future behavior.
...@@ -570,7 +570,7 @@ void __init early_cpu_init(void) ...@@ -570,7 +570,7 @@ void __init early_cpu_init(void)
* and IDT. We reload them nevertheless, this function acts as a * and IDT. We reload them nevertheless, this function acts as a
* 'CPU state barrier', nothing should get across. * 'CPU state barrier', nothing should get across.
*/ */
void __devinit cpu_init(void) void __cpuinit cpu_init(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct * t = &per_cpu(init_tss, cpu); struct tss_struct * t = &per_cpu(init_tss, cpu);
...@@ -670,7 +670,7 @@ void __devinit cpu_init(void) ...@@ -670,7 +670,7 @@ void __devinit cpu_init(void)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void __devinit cpu_uninit(void) void __cpuinit cpu_uninit(void)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
cpu_clear(cpu, cpu_initialized); cpu_clear(cpu, cpu_initialized);
......
...@@ -29,7 +29,7 @@ extern int trap_init_f00f_bug(void); ...@@ -29,7 +29,7 @@ extern int trap_init_f00f_bug(void);
struct movsl_mask movsl_mask __read_mostly; struct movsl_mask movsl_mask __read_mostly;
#endif #endif
void __devinit early_intel_workaround(struct cpuinfo_x86 *c) void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c)
{ {
if (c->x86_vendor != X86_VENDOR_INTEL) if (c->x86_vendor != X86_VENDOR_INTEL)
return; return;
...@@ -44,7 +44,7 @@ void __devinit early_intel_workaround(struct cpuinfo_x86 *c) ...@@ -44,7 +44,7 @@ void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
* This is called before we do cpu ident work * This is called before we do cpu ident work
*/ */
int __devinit ppro_with_ram_bug(void) int __cpuinit ppro_with_ram_bug(void)
{ {
/* Uses data from early_cpu_detect now */ /* Uses data from early_cpu_detect now */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
...@@ -62,7 +62,7 @@ int __devinit ppro_with_ram_bug(void) ...@@ -62,7 +62,7 @@ int __devinit ppro_with_ram_bug(void)
* P4 Xeon errata 037 workaround. * P4 Xeon errata 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache. * Hardware prefetcher may cause stale data to be loaded into the cache.
*/ */
static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
{ {
unsigned long lo, hi; unsigned long lo, hi;
...@@ -81,7 +81,7 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) ...@@ -81,7 +81,7 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
/* /*
* find out the number of processor cores on the die * find out the number of processor cores on the die
*/ */
static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
...@@ -96,7 +96,7 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) ...@@ -96,7 +96,7 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
return 1; return 1;
} }
static void __devinit init_intel(struct cpuinfo_x86 *c) static void __cpuinit init_intel(struct cpuinfo_x86 *c)
{ {
unsigned int l2 = 0; unsigned int l2 = 0;
char *p = NULL; char *p = NULL;
...@@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) ...@@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
return size; return size;
} }
static struct cpu_dev intel_cpu_dev __devinitdata = { static struct cpu_dev intel_cpu_dev __cpuinitdata = {
.c_vendor = "Intel", .c_vendor = "Intel",
.c_ident = { "GenuineIntel" }, .c_ident = { "GenuineIntel" },
.c_models = { .c_models = {
......
...@@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) ...@@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
} }
} }
} }
static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index) static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
{ {
struct _cpuid4_info *this_leaf, *sibling_leaf; struct _cpuid4_info *this_leaf, *sibling_leaf;
int sibling; int sibling;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment