Commit 078a55fc authored by Paul Gortmaker's avatar Paul Gortmaker

MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code

commit 3747069b25e419f6b51395f48127e9812abc3596 upstream.

The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications.  For example, the fix in
commit 5e427ec2 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out.  Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit  -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings.  In any case, they are temporary and harmless.

Here, we remove all the MIPS __cpuinit from C code and __CPUINIT
from asm files.  MIPS is interesting in this respect, because there
are also uasm users hiding behind their own renamed versions of the
__cpuinit macros.

[1] https://lkml.org/lkml/2013/5/20/589

[ralf@linux-mips.org: Folded in Paul's followup fix.]
Signed-off-by: default avatarPaul Gortmaker <paul.gortmaker@windriver.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5494/
Patchwork: https://patchwork.linux-mips.org/patch/5495/
Patchwork: https://patchwork.linux-mips.org/patch/5509/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 60ffef06
...@@ -182,7 +182,7 @@ const char *get_system_type(void) ...@@ -182,7 +182,7 @@ const char *get_system_type(void)
return ath79_sys_type; return ath79_sys_type;
} }
unsigned int __cpuinit get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
return CP0_LEGACY_COMPARE_IRQ; return CP0_LEGACY_COMPARE_IRQ;
} }
......
...@@ -1095,7 +1095,7 @@ static void octeon_irq_ip3_ciu(void) ...@@ -1095,7 +1095,7 @@ static void octeon_irq_ip3_ciu(void)
static bool octeon_irq_use_ip4; static bool octeon_irq_use_ip4;
static void __cpuinit octeon_irq_local_enable_ip4(void *arg) static void octeon_irq_local_enable_ip4(void *arg)
{ {
set_c0_status(STATUSF_IP4); set_c0_status(STATUSF_IP4);
} }
...@@ -1110,21 +1110,21 @@ static void (*octeon_irq_ip2)(void); ...@@ -1110,21 +1110,21 @@ static void (*octeon_irq_ip2)(void);
static void (*octeon_irq_ip3)(void); static void (*octeon_irq_ip3)(void);
static void (*octeon_irq_ip4)(void); static void (*octeon_irq_ip4)(void);
void __cpuinitdata (*octeon_irq_setup_secondary)(void); void (*octeon_irq_setup_secondary)(void);
void __cpuinit octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
{ {
octeon_irq_ip4 = h; octeon_irq_ip4 = h;
octeon_irq_use_ip4 = true; octeon_irq_use_ip4 = true;
on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
} }
static void __cpuinit octeon_irq_percpu_enable(void) static void octeon_irq_percpu_enable(void)
{ {
irq_cpu_online(); irq_cpu_online();
} }
static void __cpuinit octeon_irq_init_ciu_percpu(void) static void octeon_irq_init_ciu_percpu(void)
{ {
int coreid = cvmx_get_core_num(); int coreid = cvmx_get_core_num();
...@@ -1167,7 +1167,7 @@ static void octeon_irq_init_ciu2_percpu(void) ...@@ -1167,7 +1167,7 @@ static void octeon_irq_init_ciu2_percpu(void)
cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
} }
static void __cpuinit octeon_irq_setup_secondary_ciu(void) static void octeon_irq_setup_secondary_ciu(void)
{ {
octeon_irq_init_ciu_percpu(); octeon_irq_init_ciu_percpu();
octeon_irq_percpu_enable(); octeon_irq_percpu_enable();
......
...@@ -173,7 +173,7 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle) ...@@ -173,7 +173,7 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle)
* After we've done initial boot, this function is called to allow the * After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed * board code to clean up state, if needed
*/ */
static void __cpuinit octeon_init_secondary(void) static void octeon_init_secondary(void)
{ {
unsigned int sr; unsigned int sr;
...@@ -375,7 +375,7 @@ static int octeon_update_boot_vector(unsigned int cpu) ...@@ -375,7 +375,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
return 0; return 0;
} }
static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb, static int octeon_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
...@@ -394,7 +394,7 @@ static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb, ...@@ -394,7 +394,7 @@ static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK; return NOTIFY_OK;
} }
static int __cpuinit register_cavium_notifier(void) static int register_cavium_notifier(void)
{ {
hotcpu_notifier(octeon_cpu_callback, 0); hotcpu_notifier(octeon_cpu_callback, 0);
return 0; return 0;
......
...@@ -13,12 +13,8 @@ ...@@ -13,12 +13,8 @@
#ifdef CONFIG_EXPORT_UASM #ifdef CONFIG_EXPORT_UASM
#include <linux/export.h> #include <linux/export.h>
#define __uasminit
#define __uasminitdata
#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym) #define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
#else #else
#define __uasminit __cpuinit
#define __uasminitdata __cpuinitdata
#define UASM_EXPORT_SYMBOL(sym) #define UASM_EXPORT_SYMBOL(sym)
#endif #endif
...@@ -54,43 +50,36 @@ ...@@ -54,43 +50,36 @@
#endif #endif
#define Ip_u1u2u3(op) \ #define Ip_u1u2u3(op) \
void __uasminit \ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \ #define Ip_u2u1u3(op) \
void __uasminit \ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \ #define Ip_u3u1u2(op) \
void __uasminit \ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u1u2s3(op) \ #define Ip_u1u2s3(op) \
void __uasminit \ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \ #define Ip_u2s3u1(op) \
void __uasminit \ void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
#define Ip_u2u1s3(op) \ #define Ip_u2u1s3(op) \
void __uasminit \ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2u1msbu3(op) \ #define Ip_u2u1msbu3(op) \
void __uasminit \ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
unsigned int d) unsigned int d)
#define Ip_u1u2(op) \ #define Ip_u1u2(op) \
void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \ #define Ip_u1s2(op) \
void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b) void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a) #define Ip_u1(op) void ISAOPC(op)(u32 **buf, unsigned int a)
#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf) #define Ip_0(op) void ISAOPC(op)(u32 **buf)
Ip_u2u1s3(_addiu); Ip_u2u1s3(_addiu);
Ip_u3u1u2(_addu); Ip_u3u1u2(_addu);
...@@ -163,7 +152,7 @@ struct uasm_label { ...@@ -163,7 +152,7 @@ struct uasm_label {
int lab; int lab;
}; };
void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
int lid); int lid);
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
int ISAFUNC(uasm_in_compat_space_p)(long addr); int ISAFUNC(uasm_in_compat_space_p)(long addr);
...@@ -174,7 +163,7 @@ void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr); ...@@ -174,7 +163,7 @@ void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr); void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
#define UASM_L_LA(lb) \ #define UASM_L_LA(lb) \
static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \ static inline void ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
{ \ { \
ISAFUNC(uasm_build_label)(lab, addr, label##lb); \ ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
} }
......
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
.set mips0 .set mips0
.endm .endm
__CPUINIT
/*********************************************************************** /***********************************************************************
* Alternate CPU1 startup vector for BMIPS4350 * Alternate CPU1 startup vector for BMIPS4350
* *
...@@ -216,8 +214,6 @@ END(bmips_smp_int_vec) ...@@ -216,8 +214,6 @@ END(bmips_smp_int_vec)
* Certain CPUs support extending kseg0 to 1024MB. * Certain CPUs support extending kseg0 to 1024MB.
***********************************************************************/ ***********************************************************************/
__CPUINIT
LEAF(bmips_enable_xks01) LEAF(bmips_enable_xks01)
#if defined(CONFIG_XKS01) #if defined(CONFIG_XKS01)
......
...@@ -109,7 +109,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent); ...@@ -109,7 +109,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name); static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
void __cpuinit sb1480_clockevent_init(void) void sb1480_clockevent_init(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
......
...@@ -59,7 +59,7 @@ void gic_event_handler(struct clock_event_device *dev) ...@@ -59,7 +59,7 @@ void gic_event_handler(struct clock_event_device *dev)
{ {
} }
int __cpuinit gic_clockevent_init(void) int gic_clockevent_init(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct clock_event_device *cd; struct clock_event_device *cd;
......
...@@ -171,7 +171,7 @@ int c0_compare_int_usable(void) ...@@ -171,7 +171,7 @@ int c0_compare_int_usable(void)
} }
#ifndef CONFIG_MIPS_MT_SMTC #ifndef CONFIG_MIPS_MT_SMTC
int __cpuinit r4k_clockevent_init(void) int r4k_clockevent_init(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct clock_event_device *cd; struct clock_event_device *cd;
......
...@@ -107,7 +107,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent); ...@@ -107,7 +107,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name); static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
void __cpuinit sb1250_clockevent_init(void) void sb1250_clockevent_init(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned int irq = K_INT_TIMER_0 + cpu; unsigned int irq = K_INT_TIMER_0 + cpu;
......
...@@ -248,7 +248,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) ...@@ -248,7 +248,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
} }
int __cpuinit smtc_clockevent_init(void) int smtc_clockevent_init(void)
{ {
uint64_t mips_freq = mips_hpt_frequency; uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
......
...@@ -168,7 +168,7 @@ static inline void check_mult_sh(void) ...@@ -168,7 +168,7 @@ static inline void check_mult_sh(void)
panic(bug64hit, !R4000_WAR ? r4kwar : nowar); panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
} }
static volatile int daddi_ov __cpuinitdata; static volatile int daddi_ov;
asmlinkage void __init do_daddi_ov(struct pt_regs *regs) asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
{ {
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <asm/spram.h> #include <asm/spram.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
static int __cpuinitdata mips_fpu_disabled; static int mips_fpu_disabled;
static int __init fpu_disable(char *s) static int __init fpu_disable(char *s)
{ {
...@@ -39,7 +39,7 @@ static int __init fpu_disable(char *s) ...@@ -39,7 +39,7 @@ static int __init fpu_disable(char *s)
__setup("nofpu", fpu_disable); __setup("nofpu", fpu_disable);
int __cpuinitdata mips_dsp_disabled; int mips_dsp_disabled;
static int __init dsp_disable(char *s) static int __init dsp_disable(char *s)
{ {
...@@ -134,7 +134,7 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) ...@@ -134,7 +134,7 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
#endif #endif
} }
static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
{ {
switch (isa) { switch (isa) {
case MIPS_CPU_ISA_M64R2: case MIPS_CPU_ISA_M64R2:
...@@ -159,7 +159,7 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) ...@@ -159,7 +159,7 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
} }
} }
static char unknown_isa[] __cpuinitdata = KERN_ERR \ static char unknown_isa[] = KERN_ERR \
"Unsupported ISA type, c0.config0: %d."; "Unsupported ISA type, c0.config0: %d.";
static inline unsigned int decode_config0(struct cpuinfo_mips *c) static inline unsigned int decode_config0(struct cpuinfo_mips *c)
...@@ -290,7 +290,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) ...@@ -290,7 +290,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
return config4 & MIPS_CONF_M; return config4 & MIPS_CONF_M;
} }
static void __cpuinit decode_configs(struct cpuinfo_mips *c) static void decode_configs(struct cpuinfo_mips *c)
{ {
int ok; int ok;
...@@ -962,7 +962,7 @@ EXPORT_SYMBOL(__ua_limit); ...@@ -962,7 +962,7 @@ EXPORT_SYMBOL(__ua_limit);
const char *__cpu_name[NR_CPUS]; const char *__cpu_name[NR_CPUS];
const char *__elf_platform; const char *__elf_platform;
__cpuinit void cpu_probe(void) void cpu_probe(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -1047,7 +1047,7 @@ __cpuinit void cpu_probe(void) ...@@ -1047,7 +1047,7 @@ __cpuinit void cpu_probe(void)
#endif #endif
} }
__cpuinit void cpu_report(void) void cpu_report(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
......
...@@ -158,8 +158,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point ...@@ -158,8 +158,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
j start_kernel j start_kernel
END(kernel_entry) END(kernel_entry)
__CPUINIT
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* SMP slave cpus entry point. Board specific code for bootstrap calls this * SMP slave cpus entry point. Board specific code for bootstrap calls this
...@@ -188,5 +186,3 @@ NESTED(smp_bootstrap, 16, sp) ...@@ -188,5 +186,3 @@ NESTED(smp_bootstrap, 16, sp)
j start_secondary j start_secondary
END(smp_bootstrap) END(smp_bootstrap)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
__FINIT
...@@ -398,7 +398,7 @@ struct plat_smp_ops bmips_smp_ops = { ...@@ -398,7 +398,7 @@ struct plat_smp_ops bmips_smp_ops = {
* UP BMIPS systems as well. * UP BMIPS systems as well.
***********************************************************************/ ***********************************************************************/
static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end) static void bmips_wr_vec(unsigned long dst, char *start, char *end)
{ {
memcpy((void *)dst, start, end - start); memcpy((void *)dst, start, end - start);
dma_cache_wback((unsigned long)start, end - start); dma_cache_wback((unsigned long)start, end - start);
...@@ -406,7 +406,7 @@ static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end) ...@@ -406,7 +406,7 @@ static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end)
instruction_hazard(); instruction_hazard();
} }
static inline void __cpuinit bmips_nmi_handler_setup(void) static inline void bmips_nmi_handler_setup(void)
{ {
bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
&bmips_reset_nmi_vec_end); &bmips_reset_nmi_vec_end);
...@@ -414,7 +414,7 @@ static inline void __cpuinit bmips_nmi_handler_setup(void) ...@@ -414,7 +414,7 @@ static inline void __cpuinit bmips_nmi_handler_setup(void)
&bmips_smp_int_vec_end); &bmips_smp_int_vec_end);
} }
void __cpuinit bmips_ebase_setup(void) void bmips_ebase_setup(void)
{ {
unsigned long new_ebase = ebase; unsigned long new_ebase = ebase;
void __iomem __maybe_unused *cbr; void __iomem __maybe_unused *cbr;
......
...@@ -149,7 +149,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) ...@@ -149,7 +149,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
vsmp_send_ipi_single(i, action); vsmp_send_ipi_single(i, action);
} }
static void __cpuinit vsmp_init_secondary(void) static void vsmp_init_secondary(void)
{ {
#ifdef CONFIG_IRQ_GIC #ifdef CONFIG_IRQ_GIC
/* This is Malta specific: IPI,performance and timer interrupts */ /* This is Malta specific: IPI,performance and timer interrupts */
...@@ -162,7 +162,7 @@ static void __cpuinit vsmp_init_secondary(void) ...@@ -162,7 +162,7 @@ static void __cpuinit vsmp_init_secondary(void)
STATUSF_IP6 | STATUSF_IP7); STATUSF_IP6 | STATUSF_IP7);
} }
static void __cpuinit vsmp_smp_finish(void) static void vsmp_smp_finish(void)
{ {
/* CDFIXME: remove this? */ /* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
...@@ -188,7 +188,7 @@ static void vsmp_cpus_done(void) ...@@ -188,7 +188,7 @@ static void vsmp_cpus_done(void)
* (unsigned long)idle->thread_info the gp * (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE * assumes a 1:1 mapping of TC => VPE
*/ */
static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) static void vsmp_boot_secondary(int cpu, struct task_struct *idle)
{ {
struct thread_info *gp = task_thread_info(idle); struct thread_info *gp = task_thread_info(idle);
dvpe(); dvpe();
......
...@@ -28,11 +28,11 @@ static inline void up_send_ipi_mask(const struct cpumask *mask, ...@@ -28,11 +28,11 @@ static inline void up_send_ipi_mask(const struct cpumask *mask,
* After we've done initial boot, this function is called to allow the * After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed * board code to clean up state, if needed
*/ */
static void __cpuinit up_init_secondary(void) static void up_init_secondary(void)
{ {
} }
static void __cpuinit up_smp_finish(void) static void up_smp_finish(void)
{ {
} }
...@@ -44,7 +44,7 @@ static void up_cpus_done(void) ...@@ -44,7 +44,7 @@ static void up_cpus_done(void)
/* /*
* Firmware CPU startup hook * Firmware CPU startup hook
*/ */
static void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle) static void up_boot_secondary(int cpu, struct task_struct *idle)
{ {
} }
......
...@@ -86,7 +86,7 @@ static inline void set_cpu_sibling_map(int cpu) ...@@ -86,7 +86,7 @@ static inline void set_cpu_sibling_map(int cpu)
struct plat_smp_ops *mp_ops; struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops); EXPORT_SYMBOL(mp_ops);
__cpuinit void register_smp_ops(struct plat_smp_ops *ops) void register_smp_ops(struct plat_smp_ops *ops)
{ {
if (mp_ops) if (mp_ops)
printk(KERN_WARNING "Overriding previously set SMP ops\n"); printk(KERN_WARNING "Overriding previously set SMP ops\n");
...@@ -98,7 +98,7 @@ __cpuinit void register_smp_ops(struct plat_smp_ops *ops) ...@@ -98,7 +98,7 @@ __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
* First C code run on the secondary CPUs after being started up by * First C code run on the secondary CPUs after being started up by
* the master. * the master.
*/ */
asmlinkage __cpuinit void start_secondary(void) asmlinkage void start_secondary(void)
{ {
unsigned int cpu; unsigned int cpu;
...@@ -197,7 +197,7 @@ void smp_prepare_boot_cpu(void) ...@@ -197,7 +197,7 @@ void smp_prepare_boot_cpu(void)
cpu_set(0, cpu_callin_map); cpu_set(0, cpu_callin_map);
} }
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{ {
mp_ops->boot_secondary(cpu, tidle); mp_ops->boot_secondary(cpu, tidle);
......
...@@ -645,7 +645,7 @@ void smtc_prepare_cpus(int cpus) ...@@ -645,7 +645,7 @@ void smtc_prepare_cpus(int cpus)
* (unsigned long)idle->thread_info the gp * (unsigned long)idle->thread_info the gp
* *
*/ */
void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) void smtc_boot_secondary(int cpu, struct task_struct *idle)
{ {
extern u32 kernelsp[NR_CPUS]; extern u32 kernelsp[NR_CPUS];
unsigned long flags; unsigned long flags;
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
/* /*
* Different semantics to the set_c0_* function built by __BUILD_SET_C0 * Different semantics to the set_c0_* function built by __BUILD_SET_C0
*/ */
static __cpuinit unsigned int bis_c0_errctl(unsigned int set) static unsigned int bis_c0_errctl(unsigned int set)
{ {
unsigned int res; unsigned int res;
res = read_c0_errctl(); res = read_c0_errctl();
...@@ -45,7 +45,7 @@ static __cpuinit unsigned int bis_c0_errctl(unsigned int set) ...@@ -45,7 +45,7 @@ static __cpuinit unsigned int bis_c0_errctl(unsigned int set)
return res; return res;
} }
static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data) static void ispram_store_tag(unsigned int offset, unsigned int data)
{ {
unsigned int errctl; unsigned int errctl;
...@@ -64,7 +64,7 @@ static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data) ...@@ -64,7 +64,7 @@ static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data)
} }
static __cpuinit unsigned int ispram_load_tag(unsigned int offset) static unsigned int ispram_load_tag(unsigned int offset)
{ {
unsigned int data; unsigned int data;
unsigned int errctl; unsigned int errctl;
...@@ -82,7 +82,7 @@ static __cpuinit unsigned int ispram_load_tag(unsigned int offset) ...@@ -82,7 +82,7 @@ static __cpuinit unsigned int ispram_load_tag(unsigned int offset)
return data; return data;
} }
static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data) static void dspram_store_tag(unsigned int offset, unsigned int data)
{ {
unsigned int errctl; unsigned int errctl;
...@@ -98,7 +98,7 @@ static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data) ...@@ -98,7 +98,7 @@ static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data)
} }
static __cpuinit unsigned int dspram_load_tag(unsigned int offset) static unsigned int dspram_load_tag(unsigned int offset)
{ {
unsigned int data; unsigned int data;
unsigned int errctl; unsigned int errctl;
...@@ -115,7 +115,7 @@ static __cpuinit unsigned int dspram_load_tag(unsigned int offset) ...@@ -115,7 +115,7 @@ static __cpuinit unsigned int dspram_load_tag(unsigned int offset)
return data; return data;
} }
static __cpuinit void probe_spram(char *type, static void probe_spram(char *type,
unsigned int base, unsigned int base,
unsigned int (*read)(unsigned int), unsigned int (*read)(unsigned int),
void (*write)(unsigned int, unsigned int)) void (*write)(unsigned int, unsigned int))
...@@ -196,7 +196,7 @@ static __cpuinit void probe_spram(char *type, ...@@ -196,7 +196,7 @@ static __cpuinit void probe_spram(char *type,
offset += 2 * SPRAM_TAG_STRIDE; offset += 2 * SPRAM_TAG_STRIDE;
} }
} }
void __cpuinit spram_config(void) void spram_config(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config0; unsigned int config0;
......
...@@ -20,15 +20,15 @@ ...@@ -20,15 +20,15 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0); static atomic_t count_start_flag = ATOMIC_INIT(0);
static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0); static atomic_t count_count_start = ATOMIC_INIT(0);
static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0); static atomic_t count_count_stop = ATOMIC_INIT(0);
static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); static atomic_t count_reference = ATOMIC_INIT(0);
#define COUNTON 100 #define COUNTON 100
#define NR_LOOPS 5 #define NR_LOOPS 5
void __cpuinit synchronise_count_master(int cpu) void synchronise_count_master(int cpu)
{ {
int i; int i;
unsigned long flags; unsigned long flags;
...@@ -106,7 +106,7 @@ void __cpuinit synchronise_count_master(int cpu) ...@@ -106,7 +106,7 @@ void __cpuinit synchronise_count_master(int cpu)
printk("done.\n"); printk("done.\n");
} }
void __cpuinit synchronise_count_slave(int cpu) void synchronise_count_slave(int cpu)
{ {
int i; int i;
unsigned int initcount; unsigned int initcount;
......
...@@ -90,7 +90,7 @@ void (*board_nmi_handler_setup)(void); ...@@ -90,7 +90,7 @@ void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void); void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset); void (*board_bind_eic_interrupt)(int irq, int regset);
void (*board_ebase_setup)(void); void (*board_ebase_setup)(void);
void __cpuinitdata(*board_cache_error_setup)(void); void(*board_cache_error_setup)(void);
static void show_raw_backtrace(unsigned long reg29) static void show_raw_backtrace(unsigned long reg29)
{ {
...@@ -1682,7 +1682,7 @@ int cp0_compare_irq_shift; ...@@ -1682,7 +1682,7 @@ int cp0_compare_irq_shift;
int cp0_perfcount_irq; int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq); EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
static int __cpuinitdata noulri; static int noulri;
static int __init ulri_disable(char *s) static int __init ulri_disable(char *s)
{ {
...@@ -1693,7 +1693,7 @@ static int __init ulri_disable(char *s) ...@@ -1693,7 +1693,7 @@ static int __init ulri_disable(char *s)
} }
__setup("noulri", ulri_disable); __setup("noulri", ulri_disable);
void __cpuinit per_cpu_trap_init(bool is_boot_cpu) void per_cpu_trap_init(bool is_boot_cpu)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0; unsigned int status_set = ST0_CU0;
...@@ -1810,7 +1810,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) ...@@ -1810,7 +1810,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
} }
/* Install CPU exception handler */ /* Install CPU exception handler */
void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) void set_handler(unsigned long offset, void *addr, unsigned long size)
{ {
#ifdef CONFIG_CPU_MICROMIPS #ifdef CONFIG_CPU_MICROMIPS
memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
...@@ -1820,7 +1820,7 @@ void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) ...@@ -1820,7 +1820,7 @@ void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
local_flush_icache_range(ebase + offset, ebase + offset + size); local_flush_icache_range(ebase + offset, ebase + offset + size);
} }
static char panic_null_cerr[] __cpuinitdata = static char panic_null_cerr[] =
"Trying to set NULL cache error exception handler"; "Trying to set NULL cache error exception handler";
/* /*
...@@ -1828,7 +1828,7 @@ static char panic_null_cerr[] __cpuinitdata = ...@@ -1828,7 +1828,7 @@ static char panic_null_cerr[] __cpuinitdata =
* This is suitable only for the cache error exception which is the only * This is suitable only for the cache error exception which is the only
* exception handler that is being run uncached. * exception handler that is being run uncached.
*/ */
void __cpuinit set_uncached_handler(unsigned long offset, void *addr, void set_uncached_handler(unsigned long offset, void *addr,
unsigned long size) unsigned long size)
{ {
unsigned long uncached_ebase = CKSEG1ADDR(ebase); unsigned long uncached_ebase = CKSEG1ADDR(ebase);
......
...@@ -100,7 +100,7 @@ void mips_clear_watch_registers(void) ...@@ -100,7 +100,7 @@ void mips_clear_watch_registers(void)
} }
} }
__cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) void mips_probe_watch_registers(struct cpuinfo_mips *c)
{ {
unsigned int t; unsigned int t;
......
...@@ -461,7 +461,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) ...@@ -461,7 +461,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
return 0; return 0;
} }
unsigned int __cpuinit get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
return MIPS_CPU_TIMER_IRQ; return MIPS_CPU_TIMER_IRQ;
} }
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
* values, so we can avoid sharing the same stack area between a cached * values, so we can avoid sharing the same stack area between a cached
* and the uncached mode. * and the uncached mode.
*/ */
unsigned long __cpuinit run_uncached(void *func) unsigned long run_uncached(void *func)
{ {
register long sp __asm__("$sp"); register long sp __asm__("$sp");
register long ret __asm__("$2"); register long ret __asm__("$2");
......
...@@ -180,7 +180,7 @@ static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) ...@@ -180,7 +180,7 @@ static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
* Probe Octeon's caches * Probe Octeon's caches
* *
*/ */
static void __cpuinit probe_octeon(void) static void probe_octeon(void)
{ {
unsigned long icache_size; unsigned long icache_size;
unsigned long dcache_size; unsigned long dcache_size;
...@@ -251,7 +251,7 @@ static void __cpuinit probe_octeon(void) ...@@ -251,7 +251,7 @@ static void __cpuinit probe_octeon(void)
} }
} }
static void __cpuinit octeon_cache_error_setup(void) static void octeon_cache_error_setup(void)
{ {
extern char except_vec2_octeon; extern char except_vec2_octeon;
set_handler(0x100, &except_vec2_octeon, 0x80); set_handler(0x100, &except_vec2_octeon, 0x80);
...@@ -261,7 +261,7 @@ static void __cpuinit octeon_cache_error_setup(void) ...@@ -261,7 +261,7 @@ static void __cpuinit octeon_cache_error_setup(void)
* Setup the Octeon cache flush routines * Setup the Octeon cache flush routines
* *
*/ */
void __cpuinit octeon_cache_init(void) void octeon_cache_init(void)
{ {
probe_octeon(); probe_octeon();
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
static unsigned long icache_size, dcache_size; /* Size in bytes */ static unsigned long icache_size, dcache_size; /* Size in bytes */
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */ static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags) unsigned long r3k_cache_size(unsigned long ca_flags)
{ {
unsigned long flags, status, dummy, size; unsigned long flags, status, dummy, size;
volatile unsigned long *p; volatile unsigned long *p;
...@@ -61,7 +61,7 @@ unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags) ...@@ -61,7 +61,7 @@ unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
return size * sizeof(*p); return size * sizeof(*p);
} }
unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags) unsigned long r3k_cache_lsize(unsigned long ca_flags)
{ {
unsigned long flags, status, lsize, i; unsigned long flags, status, lsize, i;
volatile unsigned long *p; volatile unsigned long *p;
...@@ -90,7 +90,7 @@ unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags) ...@@ -90,7 +90,7 @@ unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
return lsize * sizeof(*p); return lsize * sizeof(*p);
} }
static void __cpuinit r3k_probe_cache(void) static void r3k_probe_cache(void)
{ {
dcache_size = r3k_cache_size(ST0_ISC); dcache_size = r3k_cache_size(ST0_ISC);
if (dcache_size) if (dcache_size)
...@@ -312,7 +312,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size) ...@@ -312,7 +312,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
r3k_flush_dcache_range(start, start + size); r3k_flush_dcache_range(start, start + size);
} }
void __cpuinit r3k_cache_init(void) void r3k_cache_init(void)
{ {
extern void build_clear_page(void); extern void build_clear_page(void);
extern void build_copy_page(void); extern void build_copy_page(void);
......
...@@ -107,7 +107,7 @@ static inline void r4k_blast_dcache_page_dc64(unsigned long addr) ...@@ -107,7 +107,7 @@ static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
blast_dcache64_page(addr); blast_dcache64_page(addr);
} }
static void __cpuinit r4k_blast_dcache_page_setup(void) static void r4k_blast_dcache_page_setup(void)
{ {
unsigned long dc_lsize = cpu_dcache_line_size(); unsigned long dc_lsize = cpu_dcache_line_size();
...@@ -123,7 +123,7 @@ static void __cpuinit r4k_blast_dcache_page_setup(void) ...@@ -123,7 +123,7 @@ static void __cpuinit r4k_blast_dcache_page_setup(void)
static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) static void r4k_blast_dcache_page_indexed_setup(void)
{ {
unsigned long dc_lsize = cpu_dcache_line_size(); unsigned long dc_lsize = cpu_dcache_line_size();
...@@ -140,7 +140,7 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) ...@@ -140,7 +140,7 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
void (* r4k_blast_dcache)(void); void (* r4k_blast_dcache)(void);
EXPORT_SYMBOL(r4k_blast_dcache); EXPORT_SYMBOL(r4k_blast_dcache);
static void __cpuinit r4k_blast_dcache_setup(void) static void r4k_blast_dcache_setup(void)
{ {
unsigned long dc_lsize = cpu_dcache_line_size(); unsigned long dc_lsize = cpu_dcache_line_size();
...@@ -227,7 +227,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page) ...@@ -227,7 +227,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
static void (* r4k_blast_icache_page)(unsigned long addr); static void (* r4k_blast_icache_page)(unsigned long addr);
static void __cpuinit r4k_blast_icache_page_setup(void) static void r4k_blast_icache_page_setup(void)
{ {
unsigned long ic_lsize = cpu_icache_line_size(); unsigned long ic_lsize = cpu_icache_line_size();
...@@ -244,7 +244,7 @@ static void __cpuinit r4k_blast_icache_page_setup(void) ...@@ -244,7 +244,7 @@ static void __cpuinit r4k_blast_icache_page_setup(void)
static void (* r4k_blast_icache_page_indexed)(unsigned long addr); static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
static void __cpuinit r4k_blast_icache_page_indexed_setup(void) static void r4k_blast_icache_page_indexed_setup(void)
{ {
unsigned long ic_lsize = cpu_icache_line_size(); unsigned long ic_lsize = cpu_icache_line_size();
...@@ -269,7 +269,7 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void) ...@@ -269,7 +269,7 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
void (* r4k_blast_icache)(void); void (* r4k_blast_icache)(void);
EXPORT_SYMBOL(r4k_blast_icache); EXPORT_SYMBOL(r4k_blast_icache);
static void __cpuinit r4k_blast_icache_setup(void) static void r4k_blast_icache_setup(void)
{ {
unsigned long ic_lsize = cpu_icache_line_size(); unsigned long ic_lsize = cpu_icache_line_size();
...@@ -290,7 +290,7 @@ static void __cpuinit r4k_blast_icache_setup(void) ...@@ -290,7 +290,7 @@ static void __cpuinit r4k_blast_icache_setup(void)
static void (* r4k_blast_scache_page)(unsigned long addr); static void (* r4k_blast_scache_page)(unsigned long addr);
static void __cpuinit r4k_blast_scache_page_setup(void) static void r4k_blast_scache_page_setup(void)
{ {
unsigned long sc_lsize = cpu_scache_line_size(); unsigned long sc_lsize = cpu_scache_line_size();
...@@ -308,7 +308,7 @@ static void __cpuinit r4k_blast_scache_page_setup(void) ...@@ -308,7 +308,7 @@ static void __cpuinit r4k_blast_scache_page_setup(void)
static void (* r4k_blast_scache_page_indexed)(unsigned long addr); static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
static void __cpuinit r4k_blast_scache_page_indexed_setup(void) static void r4k_blast_scache_page_indexed_setup(void)
{ {
unsigned long sc_lsize = cpu_scache_line_size(); unsigned long sc_lsize = cpu_scache_line_size();
...@@ -326,7 +326,7 @@ static void __cpuinit r4k_blast_scache_page_indexed_setup(void) ...@@ -326,7 +326,7 @@ static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
static void (* r4k_blast_scache)(void); static void (* r4k_blast_scache)(void);
static void __cpuinit r4k_blast_scache_setup(void) static void r4k_blast_scache_setup(void)
{ {
unsigned long sc_lsize = cpu_scache_line_size(); unsigned long sc_lsize = cpu_scache_line_size();
...@@ -797,11 +797,11 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c) ...@@ -797,11 +797,11 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c)
} }
} }
static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", static char *way_string[] = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way" "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
}; };
static void __cpuinit probe_pcache(void) static void probe_pcache(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
...@@ -1119,7 +1119,7 @@ static void __cpuinit probe_pcache(void) ...@@ -1119,7 +1119,7 @@ static void __cpuinit probe_pcache(void)
* executes in KSEG1 space or else you will crash and burn badly. You have * executes in KSEG1 space or else you will crash and burn badly. You have
* been warned. * been warned.
*/ */
static int __cpuinit probe_scache(void) static int probe_scache(void)
{ {
unsigned long flags, addr, begin, end, pow2; unsigned long flags, addr, begin, end, pow2;
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
...@@ -1196,7 +1196,7 @@ extern int r5k_sc_init(void); ...@@ -1196,7 +1196,7 @@ extern int r5k_sc_init(void);
extern int rm7k_sc_init(void); extern int rm7k_sc_init(void);
extern int mips_sc_init(void); extern int mips_sc_init(void);
static void __cpuinit setup_scache(void) static void setup_scache(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
...@@ -1329,7 +1329,7 @@ static void nxp_pr4450_fixup_config(void) ...@@ -1329,7 +1329,7 @@ static void nxp_pr4450_fixup_config(void)
NXP_BARRIER(); NXP_BARRIER();
} }
static int __cpuinitdata cca = -1; static int cca = -1;
static int __init cca_setup(char *str) static int __init cca_setup(char *str)
{ {
...@@ -1340,7 +1340,7 @@ static int __init cca_setup(char *str) ...@@ -1340,7 +1340,7 @@ static int __init cca_setup(char *str)
early_param("cca", cca_setup); early_param("cca", cca_setup);
static void __cpuinit coherency_setup(void) static void coherency_setup(void)
{ {
if (cca < 0 || cca > 7) if (cca < 0 || cca > 7)
cca = read_c0_config() & CONF_CM_CMASK; cca = read_c0_config() & CONF_CM_CMASK;
...@@ -1380,7 +1380,7 @@ static void __cpuinit coherency_setup(void) ...@@ -1380,7 +1380,7 @@ static void __cpuinit coherency_setup(void)
} }
} }
static void __cpuinit r4k_cache_error_setup(void) static void r4k_cache_error_setup(void)
{ {
extern char __weak except_vec2_generic; extern char __weak except_vec2_generic;
extern char __weak except_vec2_sb1; extern char __weak except_vec2_sb1;
...@@ -1398,7 +1398,7 @@ static void __cpuinit r4k_cache_error_setup(void) ...@@ -1398,7 +1398,7 @@ static void __cpuinit r4k_cache_error_setup(void)
} }
} }
void __cpuinit r4k_cache_init(void) void r4k_cache_init(void)
{ {
extern void build_clear_page(void); extern void build_clear_page(void);
extern void build_copy_page(void); extern void build_copy_page(void);
......
...@@ -344,7 +344,7 @@ static __init void tx39_probe_cache(void) ...@@ -344,7 +344,7 @@ static __init void tx39_probe_cache(void)
} }
} }
void __cpuinit tx39_cache_init(void) void tx39_cache_init(void)
{ {
extern void build_clear_page(void); extern void build_clear_page(void);
extern void build_copy_page(void); extern void build_copy_page(void);
......
...@@ -182,7 +182,7 @@ static inline void setup_protection_map(void) ...@@ -182,7 +182,7 @@ static inline void setup_protection_map(void)
} }
} }
void __cpuinit cpu_cache_init(void) void cpu_cache_init(void)
{ {
if (cpu_has_3k_cache) { if (cpu_has_3k_cache) {
extern void __weak r3k_cache_init(void); extern void __weak r3k_cache_init(void);
......
...@@ -49,8 +49,6 @@ ...@@ -49,8 +49,6 @@
* (0x170-0x17f) are used to preserve k0, k1, and ra. * (0x170-0x17f) are used to preserve k0, k1, and ra.
*/ */
__CPUINIT
LEAF(except_vec2_sb1) LEAF(except_vec2_sb1)
/* /*
* If this error is recoverable, we need to exit the handler * If this error is recoverable, we need to exit the handler
...@@ -142,8 +140,6 @@ unrecoverable: ...@@ -142,8 +140,6 @@ unrecoverable:
END(except_vec2_sb1) END(except_vec2_sb1)
__FINIT
LEAF(handle_vec2_sb1) LEAF(handle_vec2_sb1)
mfc0 k0,CP0_CONFIG mfc0 k0,CP0_CONFIG
li k1,~CONF_CM_CMASK li k1,~CONF_CM_CMASK
......
...@@ -66,29 +66,29 @@ UASM_L_LA(_copy_pref_both) ...@@ -66,29 +66,29 @@ UASM_L_LA(_copy_pref_both)
UASM_L_LA(_copy_pref_store) UASM_L_LA(_copy_pref_store)
/* We need one branch and therefore one relocation per target label. */ /* We need one branch and therefore one relocation per target label. */
static struct uasm_label __cpuinitdata labels[5]; static struct uasm_label labels[5];
static struct uasm_reloc __cpuinitdata relocs[5]; static struct uasm_reloc relocs[5];
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
static int pref_bias_clear_store __cpuinitdata; static int pref_bias_clear_store;
static int pref_bias_copy_load __cpuinitdata; static int pref_bias_copy_load;
static int pref_bias_copy_store __cpuinitdata; static int pref_bias_copy_store;
static u32 pref_src_mode __cpuinitdata; static u32 pref_src_mode;
static u32 pref_dst_mode __cpuinitdata; static u32 pref_dst_mode;
static int clear_word_size __cpuinitdata; static int clear_word_size;
static int copy_word_size __cpuinitdata; static int copy_word_size;
static int half_clear_loop_size __cpuinitdata; static int half_clear_loop_size;
static int half_copy_loop_size __cpuinitdata; static int half_copy_loop_size;
static int cache_line_size __cpuinitdata; static int cache_line_size;
#define cache_line_mask() (cache_line_size - 1) #define cache_line_mask() (cache_line_size - 1)
static inline void __cpuinit static inline void
pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
{ {
if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
...@@ -108,7 +108,7 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) ...@@ -108,7 +108,7 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
} }
} }
static void __cpuinit set_prefetch_parameters(void) static void set_prefetch_parameters(void)
{ {
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
clear_word_size = 8; clear_word_size = 8;
...@@ -199,7 +199,7 @@ static void __cpuinit set_prefetch_parameters(void) ...@@ -199,7 +199,7 @@ static void __cpuinit set_prefetch_parameters(void)
4 * copy_word_size)); 4 * copy_word_size));
} }
static void __cpuinit build_clear_store(u32 **buf, int off) static void build_clear_store(u32 **buf, int off)
{ {
if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) { if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
uasm_i_sd(buf, ZERO, off, A0); uasm_i_sd(buf, ZERO, off, A0);
...@@ -208,7 +208,7 @@ static void __cpuinit build_clear_store(u32 **buf, int off) ...@@ -208,7 +208,7 @@ static void __cpuinit build_clear_store(u32 **buf, int off)
} }
} }
static inline void __cpuinit build_clear_pref(u32 **buf, int off) static inline void build_clear_pref(u32 **buf, int off)
{ {
if (off & cache_line_mask()) if (off & cache_line_mask())
return; return;
...@@ -240,7 +240,7 @@ extern u32 __clear_page_end; ...@@ -240,7 +240,7 @@ extern u32 __clear_page_end;
extern u32 __copy_page_start; extern u32 __copy_page_start;
extern u32 __copy_page_end; extern u32 __copy_page_end;
void __cpuinit build_clear_page(void) void build_clear_page(void)
{ {
int off; int off;
u32 *buf = &__clear_page_start; u32 *buf = &__clear_page_start;
...@@ -333,7 +333,7 @@ void __cpuinit build_clear_page(void) ...@@ -333,7 +333,7 @@ void __cpuinit build_clear_page(void)
pr_debug("\t.set pop\n"); pr_debug("\t.set pop\n");
} }
static void __cpuinit build_copy_load(u32 **buf, int reg, int off) static void build_copy_load(u32 **buf, int reg, int off)
{ {
if (cpu_has_64bit_gp_regs) { if (cpu_has_64bit_gp_regs) {
uasm_i_ld(buf, reg, off, A1); uasm_i_ld(buf, reg, off, A1);
...@@ -342,7 +342,7 @@ static void __cpuinit build_copy_load(u32 **buf, int reg, int off) ...@@ -342,7 +342,7 @@ static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
} }
} }
static void __cpuinit build_copy_store(u32 **buf, int reg, int off) static void build_copy_store(u32 **buf, int reg, int off)
{ {
if (cpu_has_64bit_gp_regs) { if (cpu_has_64bit_gp_regs) {
uasm_i_sd(buf, reg, off, A0); uasm_i_sd(buf, reg, off, A0);
...@@ -387,7 +387,7 @@ static inline void build_copy_store_pref(u32 **buf, int off) ...@@ -387,7 +387,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
} }
} }
void __cpuinit build_copy_page(void) void build_copy_page(void)
{ {
int off; int off;
u32 *buf = &__copy_page_start; u32 *buf = &__copy_page_start;
......
...@@ -167,7 +167,7 @@ static struct bcache_ops indy_sc_ops = { ...@@ -167,7 +167,7 @@ static struct bcache_ops indy_sc_ops = {
.bc_inv = indy_sc_wback_invalidate .bc_inv = indy_sc_wback_invalidate
}; };
void __cpuinit indy_sc_init(void) void indy_sc_init(void)
{ {
if (indy_sc_probe()) { if (indy_sc_probe()) {
indy_sc_enable(); indy_sc_enable();
......
...@@ -132,7 +132,7 @@ static inline int __init mips_sc_probe(void) ...@@ -132,7 +132,7 @@ static inline int __init mips_sc_probe(void)
return 1; return 1;
} }
int __cpuinit mips_sc_init(void) int mips_sc_init(void)
{ {
int found = mips_sc_probe(); int found = mips_sc_probe();
if (found) { if (found) {
......
...@@ -98,7 +98,7 @@ static struct bcache_ops r5k_sc_ops = { ...@@ -98,7 +98,7 @@ static struct bcache_ops r5k_sc_ops = {
.bc_inv = r5k_dma_cache_inv_sc .bc_inv = r5k_dma_cache_inv_sc
}; };
void __cpuinit r5k_sc_init(void) void r5k_sc_init(void)
{ {
if (r5k_sc_probe()) { if (r5k_sc_probe()) {
r5k_sc_enable(); r5k_sc_enable();
......
...@@ -104,7 +104,7 @@ static void blast_rm7k_tcache(void) ...@@ -104,7 +104,7 @@ static void blast_rm7k_tcache(void)
/* /*
* This function is executed in uncached address space. * This function is executed in uncached address space.
*/ */
static __cpuinit void __rm7k_tc_enable(void) static void __rm7k_tc_enable(void)
{ {
int i; int i;
...@@ -117,7 +117,7 @@ static __cpuinit void __rm7k_tc_enable(void) ...@@ -117,7 +117,7 @@ static __cpuinit void __rm7k_tc_enable(void)
cache_op(Index_Store_Tag_T, CKSEG0ADDR(i)); cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
} }
static __cpuinit void rm7k_tc_enable(void) static void rm7k_tc_enable(void)
{ {
if (read_c0_config() & RM7K_CONF_TE) if (read_c0_config() & RM7K_CONF_TE)
return; return;
...@@ -130,7 +130,7 @@ static __cpuinit void rm7k_tc_enable(void) ...@@ -130,7 +130,7 @@ static __cpuinit void rm7k_tc_enable(void)
/* /*
* This function is executed in uncached address space. * This function is executed in uncached address space.
*/ */
static __cpuinit void __rm7k_sc_enable(void) static void __rm7k_sc_enable(void)
{ {
int i; int i;
...@@ -143,7 +143,7 @@ static __cpuinit void __rm7k_sc_enable(void) ...@@ -143,7 +143,7 @@ static __cpuinit void __rm7k_sc_enable(void)
cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i)); cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
} }
static __cpuinit void rm7k_sc_enable(void) static void rm7k_sc_enable(void)
{ {
if (read_c0_config() & RM7K_CONF_SE) if (read_c0_config() & RM7K_CONF_SE)
return; return;
...@@ -184,7 +184,7 @@ static struct bcache_ops rm7k_sc_ops = { ...@@ -184,7 +184,7 @@ static struct bcache_ops rm7k_sc_ops = {
* This is a probing function like the one found in c-r4k.c, we look for the * This is a probing function like the one found in c-r4k.c, we look for the
* wrap around point with different addresses. * wrap around point with different addresses.
*/ */
static __cpuinit void __probe_tcache(void) static void __probe_tcache(void)
{ {
unsigned long flags, addr, begin, end, pow2; unsigned long flags, addr, begin, end, pow2;
...@@ -226,7 +226,7 @@ static __cpuinit void __probe_tcache(void) ...@@ -226,7 +226,7 @@ static __cpuinit void __probe_tcache(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
void __cpuinit rm7k_sc_init(void) void rm7k_sc_init(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
......
...@@ -276,7 +276,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, ...@@ -276,7 +276,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
} }
} }
void __cpuinit tlb_init(void) void tlb_init(void)
{ {
local_flush_tlb_all(); local_flush_tlb_all();
......
...@@ -389,7 +389,7 @@ int __init has_transparent_hugepage(void) ...@@ -389,7 +389,7 @@ int __init has_transparent_hugepage(void)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static int __cpuinitdata ntlb; static int ntlb;
static int __init set_ntlb(char *str) static int __init set_ntlb(char *str)
{ {
get_option(&str, &ntlb); get_option(&str, &ntlb);
...@@ -398,7 +398,7 @@ static int __init set_ntlb(char *str) ...@@ -398,7 +398,7 @@ static int __init set_ntlb(char *str)
__setup("ntlb=", set_ntlb); __setup("ntlb=", set_ntlb);
void __cpuinit tlb_init(void) void tlb_init(void)
{ {
/* /*
* You should never change this register: * You should never change this register:
......
...@@ -213,14 +213,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -213,14 +213,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void __cpuinit probe_tlb(unsigned long config) static void probe_tlb(unsigned long config)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
} }
void __cpuinit tlb_init(void) void tlb_init(void)
{ {
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
unsigned long status; unsigned long status;
......
...@@ -136,7 +136,7 @@ static int scratchpad_offset(int i) ...@@ -136,7 +136,7 @@ static int scratchpad_offset(int i)
* why; it's not an issue caused by the core RTL. * why; it's not an issue caused by the core RTL.
* *
*/ */
static int __cpuinit m4kc_tlbp_war(void) static int m4kc_tlbp_war(void)
{ {
return (current_cpu_data.processor_id & 0xffff00) == return (current_cpu_data.processor_id & 0xffff00) ==
(PRID_COMP_MIPS | PRID_IMP_4KC); (PRID_COMP_MIPS | PRID_IMP_4KC);
...@@ -181,11 +181,9 @@ UASM_L_LA(_large_segbits_fault) ...@@ -181,11 +181,9 @@ UASM_L_LA(_large_segbits_fault)
UASM_L_LA(_tlb_huge_update) UASM_L_LA(_tlb_huge_update)
#endif #endif
static int __cpuinitdata hazard_instance; static int hazard_instance;
static void __cpuinit uasm_bgezl_hazard(u32 **p, static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
struct uasm_reloc **r,
int instance)
{ {
switch (instance) { switch (instance) {
case 0 ... 7: case 0 ... 7:
...@@ -196,9 +194,7 @@ static void __cpuinit uasm_bgezl_hazard(u32 **p, ...@@ -196,9 +194,7 @@ static void __cpuinit uasm_bgezl_hazard(u32 **p,
} }
} }
static void __cpuinit uasm_bgezl_label(struct uasm_label **l, static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
u32 **p,
int instance)
{ {
switch (instance) { switch (instance) {
case 0 ... 7: case 0 ... 7:
...@@ -295,15 +291,15 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun ...@@ -295,15 +291,15 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun
* We deliberately chose a buffer size of 128, so we won't scribble * We deliberately chose a buffer size of 128, so we won't scribble
* over anything important on overflow before we panic. * over anything important on overflow before we panic.
*/ */
static u32 tlb_handler[128] __cpuinitdata; static u32 tlb_handler[128];
/* simply assume worst case size for labels and relocs */ /* simply assume worst case size for labels and relocs */
static struct uasm_label labels[128] __cpuinitdata; static struct uasm_label labels[128];
static struct uasm_reloc relocs[128] __cpuinitdata; static struct uasm_reloc relocs[128];
static int check_for_high_segbits __cpuinitdata; static int check_for_high_segbits;
static unsigned int kscratch_used_mask __cpuinitdata; static unsigned int kscratch_used_mask;
static inline int __maybe_unused c0_kscratch(void) static inline int __maybe_unused c0_kscratch(void)
{ {
...@@ -316,7 +312,7 @@ static inline int __maybe_unused c0_kscratch(void) ...@@ -316,7 +312,7 @@ static inline int __maybe_unused c0_kscratch(void)
} }
} }
static int __cpuinit allocate_kscratch(void) static int allocate_kscratch(void)
{ {
int r; int r;
unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
...@@ -333,11 +329,11 @@ static int __cpuinit allocate_kscratch(void) ...@@ -333,11 +329,11 @@ static int __cpuinit allocate_kscratch(void)
return r; return r;
} }
static int scratch_reg __cpuinitdata; static int scratch_reg;
static int pgd_reg __cpuinitdata; static int pgd_reg;
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
static struct work_registers __cpuinit build_get_work_registers(u32 **p) static struct work_registers build_get_work_registers(u32 **p)
{ {
struct work_registers r; struct work_registers r;
...@@ -393,7 +389,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p) ...@@ -393,7 +389,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p)
return r; return r;
} }
static void __cpuinit build_restore_work_registers(u32 **p) static void build_restore_work_registers(u32 **p)
{ {
if (scratch_reg >= 0) { if (scratch_reg >= 0) {
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
...@@ -418,7 +414,7 @@ extern unsigned long pgd_current[]; ...@@ -418,7 +414,7 @@ extern unsigned long pgd_current[];
/* /*
* The R3000 TLB handler is simple. * The R3000 TLB handler is simple.
*/ */
static void __cpuinit build_r3000_tlb_refill_handler(void) static void build_r3000_tlb_refill_handler(void)
{ {
long pgdc = (long)pgd_current; long pgdc = (long)pgd_current;
u32 *p; u32 *p;
...@@ -463,7 +459,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void) ...@@ -463,7 +459,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
* other one.To keep things simple, we first assume linear space, * other one.To keep things simple, we first assume linear space,
* then we relocate it to the final handler layout as needed. * then we relocate it to the final handler layout as needed.
*/ */
static u32 final_handler[64] __cpuinitdata; static u32 final_handler[64];
/* /*
* Hazards * Hazards
...@@ -487,7 +483,7 @@ static u32 final_handler[64] __cpuinitdata; ...@@ -487,7 +483,7 @@ static u32 final_handler[64] __cpuinitdata;
* *
* As if we MIPS hackers wouldn't know how to nop pipelines happy ... * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/ */
static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) static void __maybe_unused build_tlb_probe_entry(u32 **p)
{ {
switch (current_cpu_type()) { switch (current_cpu_type()) {
/* Found by experiment: R4600 v2.0/R4700 needs this, too. */ /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
...@@ -511,9 +507,9 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) ...@@ -511,9 +507,9 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
*/ */
enum tlb_write_entry { tlb_random, tlb_indexed }; enum tlb_write_entry { tlb_random, tlb_indexed };
static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, struct uasm_reloc **r,
enum tlb_write_entry wmode) enum tlb_write_entry wmode)
{ {
void(*tlbw)(u32 **) = NULL; void(*tlbw)(u32 **) = NULL;
...@@ -647,8 +643,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, ...@@ -647,8 +643,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
} }
} }
static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
unsigned int reg) unsigned int reg)
{ {
if (cpu_has_rixi) { if (cpu_has_rixi) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
...@@ -663,11 +659,9 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, ...@@ -663,11 +659,9 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
static __cpuinit void build_restore_pagemask(u32 **p, static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
struct uasm_reloc **r, unsigned int tmp, enum label_id lid,
unsigned int tmp, int restore_scratch)
enum label_id lid,
int restore_scratch)
{ {
if (restore_scratch) { if (restore_scratch) {
/* Reset default page size */ /* Reset default page size */
...@@ -706,12 +700,11 @@ static __cpuinit void build_restore_pagemask(u32 **p, ...@@ -706,12 +700,11 @@ static __cpuinit void build_restore_pagemask(u32 **p,
} }
} }
static __cpuinit void build_huge_tlb_write_entry(u32 **p, static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_label **l, struct uasm_reloc **r,
struct uasm_reloc **r, unsigned int tmp,
unsigned int tmp, enum tlb_write_entry wmode,
enum tlb_write_entry wmode, int restore_scratch)
int restore_scratch)
{ {
/* Set huge page tlb entry size */ /* Set huge page tlb entry size */
uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
...@@ -726,9 +719,9 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p, ...@@ -726,9 +719,9 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p,
/* /*
* Check if Huge PTE is present, if so then jump to LABEL. * Check if Huge PTE is present, if so then jump to LABEL.
*/ */
static void __cpuinit static void
build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
unsigned int pmd, int lid) unsigned int pmd, int lid)
{ {
UASM_i_LW(p, tmp, 0, pmd); UASM_i_LW(p, tmp, 0, pmd);
if (use_bbit_insns()) { if (use_bbit_insns()) {
...@@ -739,9 +732,8 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, ...@@ -739,9 +732,8 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
} }
} }
static __cpuinit void build_huge_update_entries(u32 **p, static void build_huge_update_entries(u32 **p, unsigned int pte,
unsigned int pte, unsigned int tmp)
unsigned int tmp)
{ {
int small_sequence; int small_sequence;
...@@ -771,11 +763,10 @@ static __cpuinit void build_huge_update_entries(u32 **p, ...@@ -771,11 +763,10 @@ static __cpuinit void build_huge_update_entries(u32 **p,
UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
} }
static __cpuinit void build_huge_handler_tail(u32 **p, static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
struct uasm_reloc **r, struct uasm_label **l,
struct uasm_label **l, unsigned int pte,
unsigned int pte, unsigned int ptr)
unsigned int ptr)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr); UASM_i_SC(p, pte, 0, ptr);
...@@ -794,7 +785,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p, ...@@ -794,7 +785,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
* TMP and PTR are scratch. * TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry. * TMP will be clobbered, PTR will hold the pmd entry.
*/ */
static void __cpuinit static void
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int tmp, unsigned int ptr) unsigned int tmp, unsigned int ptr)
{ {
...@@ -886,7 +877,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, ...@@ -886,7 +877,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* BVADDR is the faulting address, PTR is scratch. * BVADDR is the faulting address, PTR is scratch.
* PTR will hold the pgd for vmalloc. * PTR will hold the pgd for vmalloc.
*/ */
static void __cpuinit static void
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int bvaddr, unsigned int ptr, unsigned int bvaddr, unsigned int ptr,
enum vmalloc64_mode mode) enum vmalloc64_mode mode)
...@@ -956,7 +947,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, ...@@ -956,7 +947,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* TMP and PTR are scratch. * TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry. * TMP will be clobbered, PTR will hold the pgd entry.
*/ */
static void __cpuinit __maybe_unused static void __maybe_unused
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{ {
long pgdc = (long)pgd_current; long pgdc = (long)pgd_current;
...@@ -991,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) ...@@ -991,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#endif /* !CONFIG_64BIT */ #endif /* !CONFIG_64BIT */
static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) static void build_adjust_context(u32 **p, unsigned int ctx)
{ {
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
...@@ -1017,7 +1008,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) ...@@ -1017,7 +1008,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
uasm_i_andi(p, ctx, ctx, mask); uasm_i_andi(p, ctx, ctx, mask);
} }
static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{ {
/* /*
* Bug workaround for the Nevada. It seems as if under certain * Bug workaround for the Nevada. It seems as if under certain
...@@ -1042,8 +1033,7 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr ...@@ -1042,8 +1033,7 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
} }
static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
unsigned int ptep)
{ {
/* /*
* 64bit address support (36bit on a 32bit CPU) in a 32bit * 64bit address support (36bit on a 32bit CPU) in a 32bit
...@@ -1104,7 +1094,7 @@ struct mips_huge_tlb_info { ...@@ -1104,7 +1094,7 @@ struct mips_huge_tlb_info {
int restore_scratch; int restore_scratch;
}; };
static struct mips_huge_tlb_info __cpuinit static struct mips_huge_tlb_info
build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp, struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr, int c0_scratch_reg) unsigned int ptr, int c0_scratch_reg)
...@@ -1282,7 +1272,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, ...@@ -1282,7 +1272,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
*/ */
#define MIPS64_REFILL_INSNS 32 #define MIPS64_REFILL_INSNS 32
static void __cpuinit build_r4000_tlb_refill_handler(void) static void build_r4000_tlb_refill_handler(void)
{ {
u32 *p = tlb_handler; u32 *p = tlb_handler;
struct uasm_label *l = labels; struct uasm_label *l = labels;
...@@ -1462,7 +1452,7 @@ extern u32 handle_tlbm[], handle_tlbm_end[]; ...@@ -1462,7 +1452,7 @@ extern u32 handle_tlbm[], handle_tlbm_end[];
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
static void __cpuinit build_r4000_setup_pgd(void) static void build_r4000_setup_pgd(void)
{ {
const int a0 = 4; const int a0 = 4;
const int a1 = 5; const int a1 = 5;
...@@ -1513,7 +1503,7 @@ static void __cpuinit build_r4000_setup_pgd(void) ...@@ -1513,7 +1503,7 @@ static void __cpuinit build_r4000_setup_pgd(void)
} }
#endif #endif
static void __cpuinit static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1533,7 +1523,7 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) ...@@ -1533,7 +1523,7 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
#endif #endif
} }
static void __cpuinit static void
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int mode) unsigned int mode)
{ {
...@@ -1593,7 +1583,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, ...@@ -1593,7 +1583,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
* the page table where this PTE is located, PTE will be re-loaded * the page table where this PTE is located, PTE will be re-loaded
* with it's original value. * with it's original value.
*/ */
static void __cpuinit static void
build_pte_present(u32 **p, struct uasm_reloc **r, build_pte_present(u32 **p, struct uasm_reloc **r,
int pte, int ptr, int scratch, enum label_id lid) int pte, int ptr, int scratch, enum label_id lid)
{ {
...@@ -1621,7 +1611,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r, ...@@ -1621,7 +1611,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
} }
/* Make PTE valid, store result in PTR. */ /* Make PTE valid, store result in PTR. */
static void __cpuinit static void
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr) unsigned int ptr)
{ {
...@@ -1634,7 +1624,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, ...@@ -1634,7 +1624,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be written to, if not branch to LABEL. Regardless * Check if PTE can be written to, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done. * restore PTE with value from PTR when done.
*/ */
static void __cpuinit static void
build_pte_writable(u32 **p, struct uasm_reloc **r, build_pte_writable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, int scratch, unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid) enum label_id lid)
...@@ -1654,7 +1644,7 @@ build_pte_writable(u32 **p, struct uasm_reloc **r, ...@@ -1654,7 +1644,7 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
/* Make PTE writable, update software status bits as well, then store /* Make PTE writable, update software status bits as well, then store
* at PTR. * at PTR.
*/ */
static void __cpuinit static void
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr) unsigned int ptr)
{ {
...@@ -1668,7 +1658,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, ...@@ -1668,7 +1658,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be modified, if not branch to LABEL. Regardless * Check if PTE can be modified, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done. * restore PTE with value from PTR when done.
*/ */
static void __cpuinit static void
build_pte_modifiable(u32 **p, struct uasm_reloc **r, build_pte_modifiable(u32 **p, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, int scratch, unsigned int pte, unsigned int ptr, int scratch,
enum label_id lid) enum label_id lid)
...@@ -1697,7 +1687,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r, ...@@ -1697,7 +1687,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
* This places the pte into ENTRYLO0 and writes it with tlbwi. * This places the pte into ENTRYLO0 and writes it with tlbwi.
* Then it returns. * Then it returns.
*/ */
static void __cpuinit static void
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
{ {
uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
...@@ -1713,7 +1703,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) ...@@ -1713,7 +1703,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
* may have the probe fail bit set as a result of a trap on a * may have the probe fail bit set as a result of a trap on a
* kseg2 access, i.e. without refill. Then it returns. * kseg2 access, i.e. without refill. Then it returns.
*/ */
static void __cpuinit static void
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int pte, struct uasm_reloc **r, unsigned int pte,
unsigned int tmp) unsigned int tmp)
...@@ -1731,7 +1721,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, ...@@ -1731,7 +1721,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
uasm_i_rfe(p); /* branch delay */ uasm_i_rfe(p); /* branch delay */
} }
static void __cpuinit static void
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
unsigned int ptr) unsigned int ptr)
{ {
...@@ -1751,7 +1741,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, ...@@ -1751,7 +1741,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
uasm_i_tlbp(p); /* load delay */ uasm_i_tlbp(p); /* load delay */
} }
static void __cpuinit build_r3000_tlb_load_handler(void) static void build_r3000_tlb_load_handler(void)
{ {
u32 *p = handle_tlbl; u32 *p = handle_tlbl;
const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
...@@ -1782,7 +1772,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void) ...@@ -1782,7 +1772,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
} }
static void __cpuinit build_r3000_tlb_store_handler(void) static void build_r3000_tlb_store_handler(void)
{ {
u32 *p = handle_tlbs; u32 *p = handle_tlbs;
const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
...@@ -1813,7 +1803,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void) ...@@ -1813,7 +1803,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
} }
static void __cpuinit build_r3000_tlb_modify_handler(void) static void build_r3000_tlb_modify_handler(void)
{ {
u32 *p = handle_tlbm; u32 *p = handle_tlbm;
const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
...@@ -1848,7 +1838,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) ...@@ -1848,7 +1838,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
/* /*
* R4000 style TLB load/store/modify handlers. * R4000 style TLB load/store/modify handlers.
*/ */
static struct work_registers __cpuinit static struct work_registers
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
struct uasm_reloc **r) struct uasm_reloc **r)
{ {
...@@ -1884,7 +1874,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, ...@@ -1884,7 +1874,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
return wr; return wr;
} }
static void __cpuinit static void
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp, struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr) unsigned int ptr)
...@@ -1902,7 +1892,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, ...@@ -1902,7 +1892,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
#endif #endif
} }
static void __cpuinit build_r4000_tlb_load_handler(void) static void build_r4000_tlb_load_handler(void)
{ {
u32 *p = handle_tlbl; u32 *p = handle_tlbl;
const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
...@@ -2085,7 +2075,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void) ...@@ -2085,7 +2075,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
} }
static void __cpuinit build_r4000_tlb_store_handler(void) static void build_r4000_tlb_store_handler(void)
{ {
u32 *p = handle_tlbs; u32 *p = handle_tlbs;
const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
...@@ -2140,7 +2130,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void) ...@@ -2140,7 +2130,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
} }
static void __cpuinit build_r4000_tlb_modify_handler(void) static void build_r4000_tlb_modify_handler(void)
{ {
u32 *p = handle_tlbm; u32 *p = handle_tlbm;
const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
...@@ -2196,7 +2186,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) ...@@ -2196,7 +2186,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
} }
static void __cpuinit flush_tlb_handlers(void) static void flush_tlb_handlers(void)
{ {
local_flush_icache_range((unsigned long)handle_tlbl, local_flush_icache_range((unsigned long)handle_tlbl,
(unsigned long)handle_tlbl_end); (unsigned long)handle_tlbl_end);
...@@ -2210,7 +2200,7 @@ static void __cpuinit flush_tlb_handlers(void) ...@@ -2210,7 +2200,7 @@ static void __cpuinit flush_tlb_handlers(void)
#endif #endif
} }
void __cpuinit build_tlb_refill_handler(void) void build_tlb_refill_handler(void)
{ {
/* /*
* The refill handler is generated per-CPU, multi-node systems * The refill handler is generated per-CPU, multi-node systems
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#include "uasm.c" #include "uasm.c"
static struct insn insn_table_MM[] __uasminitdata = { static struct insn insn_table_MM[] = {
{ insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD }, { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
{ insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD }, { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
...@@ -118,7 +118,7 @@ static struct insn insn_table_MM[] __uasminitdata = { ...@@ -118,7 +118,7 @@ static struct insn insn_table_MM[] __uasminitdata = {
#undef M #undef M
static inline __uasminit u32 build_bimm(s32 arg) static inline u32 build_bimm(s32 arg)
{ {
WARN(arg > 0xffff || arg < -0x10000, WARN(arg > 0xffff || arg < -0x10000,
KERN_WARNING "Micro-assembler field overflow\n"); KERN_WARNING "Micro-assembler field overflow\n");
...@@ -128,7 +128,7 @@ static inline __uasminit u32 build_bimm(s32 arg) ...@@ -128,7 +128,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff); return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
} }
static inline __uasminit u32 build_jimm(u32 arg) static inline u32 build_jimm(u32 arg)
{ {
WARN(arg & ~((JIMM_MASK << 2) | 1), WARN(arg & ~((JIMM_MASK << 2) | 1),
...@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg) ...@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
* The order of opcode arguments is implicitly left to right, * The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM. * starting with RS and ending with FUNC or IMM.
*/ */
static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) static void build_insn(u32 **buf, enum opcode opc, ...)
{ {
struct insn *ip = NULL; struct insn *ip = NULL;
unsigned int i; unsigned int i;
...@@ -199,7 +199,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) ...@@ -199,7 +199,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
(*buf)++; (*buf)++;
} }
static inline void __uasminit static inline void
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{ {
long laddr = (long)lab->addr; long laddr = (long)lab->addr;
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#include "uasm.c" #include "uasm.c"
static struct insn insn_table[] __uasminitdata = { static struct insn insn_table[] = {
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
...@@ -119,7 +119,7 @@ static struct insn insn_table[] __uasminitdata = { ...@@ -119,7 +119,7 @@ static struct insn insn_table[] __uasminitdata = {
#undef M #undef M
static inline __uasminit u32 build_bimm(s32 arg) static inline u32 build_bimm(s32 arg)
{ {
WARN(arg > 0x1ffff || arg < -0x20000, WARN(arg > 0x1ffff || arg < -0x20000,
KERN_WARNING "Micro-assembler field overflow\n"); KERN_WARNING "Micro-assembler field overflow\n");
...@@ -129,7 +129,7 @@ static inline __uasminit u32 build_bimm(s32 arg) ...@@ -129,7 +129,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
} }
static inline __uasminit u32 build_jimm(u32 arg) static inline u32 build_jimm(u32 arg)
{ {
WARN(arg & ~(JIMM_MASK << 2), WARN(arg & ~(JIMM_MASK << 2),
KERN_WARNING "Micro-assembler field overflow\n"); KERN_WARNING "Micro-assembler field overflow\n");
...@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg) ...@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
* The order of opcode arguments is implicitly left to right, * The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM. * starting with RS and ending with FUNC or IMM.
*/ */
static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) static void build_insn(u32 **buf, enum opcode opc, ...)
{ {
struct insn *ip = NULL; struct insn *ip = NULL;
unsigned int i; unsigned int i;
...@@ -187,7 +187,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) ...@@ -187,7 +187,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
(*buf)++; (*buf)++;
} }
static inline void __uasminit static inline void
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{ {
long laddr = (long)lab->addr; long laddr = (long)lab->addr;
......
...@@ -63,35 +63,35 @@ struct insn { ...@@ -63,35 +63,35 @@ struct insn {
enum fields fields; enum fields fields;
}; };
static inline __uasminit u32 build_rs(u32 arg) static inline u32 build_rs(u32 arg)
{ {
WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RS_MASK) << RS_SH; return (arg & RS_MASK) << RS_SH;
} }
static inline __uasminit u32 build_rt(u32 arg) static inline u32 build_rt(u32 arg)
{ {
WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RT_MASK) << RT_SH; return (arg & RT_MASK) << RT_SH;
} }
static inline __uasminit u32 build_rd(u32 arg) static inline u32 build_rd(u32 arg)
{ {
WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RD_MASK) << RD_SH; return (arg & RD_MASK) << RD_SH;
} }
static inline __uasminit u32 build_re(u32 arg) static inline u32 build_re(u32 arg)
{ {
WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return (arg & RE_MASK) << RE_SH; return (arg & RE_MASK) << RE_SH;
} }
static inline __uasminit u32 build_simm(s32 arg) static inline u32 build_simm(s32 arg)
{ {
WARN(arg > 0x7fff || arg < -0x8000, WARN(arg > 0x7fff || arg < -0x8000,
KERN_WARNING "Micro-assembler field overflow\n"); KERN_WARNING "Micro-assembler field overflow\n");
...@@ -99,14 +99,14 @@ static inline __uasminit u32 build_simm(s32 arg) ...@@ -99,14 +99,14 @@ static inline __uasminit u32 build_simm(s32 arg)
return arg & 0xffff; return arg & 0xffff;
} }
static inline __uasminit u32 build_uimm(u32 arg) static inline u32 build_uimm(u32 arg)
{ {
WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & IMM_MASK; return arg & IMM_MASK;
} }
static inline __uasminit u32 build_scimm(u32 arg) static inline u32 build_scimm(u32 arg)
{ {
WARN(arg & ~SCIMM_MASK, WARN(arg & ~SCIMM_MASK,
KERN_WARNING "Micro-assembler field overflow\n"); KERN_WARNING "Micro-assembler field overflow\n");
...@@ -114,21 +114,21 @@ static inline __uasminit u32 build_scimm(u32 arg) ...@@ -114,21 +114,21 @@ static inline __uasminit u32 build_scimm(u32 arg)
return (arg & SCIMM_MASK) << SCIMM_SH; return (arg & SCIMM_MASK) << SCIMM_SH;
} }
static inline __uasminit u32 build_func(u32 arg) static inline u32 build_func(u32 arg)
{ {
WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & FUNC_MASK; return arg & FUNC_MASK;
} }
static inline __uasminit u32 build_set(u32 arg) static inline u32 build_set(u32 arg)
{ {
WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
return arg & SET_MASK; return arg & SET_MASK;
} }
static void __uasminit build_insn(u32 **buf, enum opcode opc, ...); static void build_insn(u32 **buf, enum opcode opc, ...);
#define I_u1u2u3(op) \ #define I_u1u2u3(op) \
Ip_u1u2u3(op) \ Ip_u1u2u3(op) \
...@@ -286,7 +286,7 @@ I_u3u1u2(_ldx) ...@@ -286,7 +286,7 @@ I_u3u1u2(_ldx)
#ifdef CONFIG_CPU_CAVIUM_OCTEON #ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h> #include <asm/octeon/octeon.h>
void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
unsigned int c) unsigned int c)
{ {
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
...@@ -304,7 +304,7 @@ I_u2s3u1(_pref) ...@@ -304,7 +304,7 @@ I_u2s3u1(_pref)
#endif #endif
/* Handle labels. */ /* Handle labels. */
void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid) void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
{ {
(*lab)->addr = addr; (*lab)->addr = addr;
(*lab)->lab = lid; (*lab)->lab = lid;
...@@ -312,7 +312,7 @@ void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, in ...@@ -312,7 +312,7 @@ void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, in
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr) int ISAFUNC(uasm_in_compat_space_p)(long addr)
{ {
/* Is this address in 32bit compat space? */ /* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
...@@ -323,7 +323,7 @@ int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr) ...@@ -323,7 +323,7 @@ int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
static int __uasminit uasm_rel_highest(long val) static int uasm_rel_highest(long val)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
...@@ -332,7 +332,7 @@ static int __uasminit uasm_rel_highest(long val) ...@@ -332,7 +332,7 @@ static int __uasminit uasm_rel_highest(long val)
#endif #endif
} }
static int __uasminit uasm_rel_higher(long val) static int uasm_rel_higher(long val)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
...@@ -341,19 +341,19 @@ static int __uasminit uasm_rel_higher(long val) ...@@ -341,19 +341,19 @@ static int __uasminit uasm_rel_higher(long val)
#endif #endif
} }
int __uasminit ISAFUNC(uasm_rel_hi)(long val) int ISAFUNC(uasm_rel_hi)(long val)
{ {
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
int __uasminit ISAFUNC(uasm_rel_lo)(long val) int ISAFUNC(uasm_rel_lo)(long val)
{ {
return ((val & 0xffff) ^ 0x8000) - 0x8000; return ((val & 0xffff) ^ 0x8000) - 0x8000;
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
{ {
if (!ISAFUNC(uasm_in_compat_space_p)(addr)) { if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr)); ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
...@@ -371,7 +371,7 @@ void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) ...@@ -371,7 +371,7 @@ void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
} }
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly)); UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
{ {
ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr); ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
if (ISAFUNC(uasm_rel_lo(addr))) { if (ISAFUNC(uasm_rel_lo(addr))) {
...@@ -386,8 +386,7 @@ void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) ...@@ -386,8 +386,7 @@ void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA)); UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
/* Handle relocations. */ /* Handle relocations. */
void __uasminit void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
{ {
(*rel)->addr = addr; (*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16; (*rel)->type = R_MIPS_PC16;
...@@ -396,11 +395,11 @@ ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid) ...@@ -396,11 +395,11 @@ ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
static inline void __uasminit static inline void __resolve_relocs(struct uasm_reloc *rel,
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); struct uasm_label *lab);
void __uasminit void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab) struct uasm_label *lab)
{ {
struct uasm_label *l; struct uasm_label *l;
...@@ -411,8 +410,8 @@ ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab) ...@@ -411,8 +410,8 @@ ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
void __uasminit void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off) long off)
{ {
for (; rel->lab != UASM_LABEL_INVALID; rel++) for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end) if (rel->addr >= first && rel->addr < end)
...@@ -420,8 +419,8 @@ ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off ...@@ -420,8 +419,8 @@ ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
void __uasminit void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off) long off)
{ {
for (; lab->lab != UASM_LABEL_INVALID; lab++) for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end) if (lab->addr >= first && lab->addr < end)
...@@ -429,9 +428,8 @@ ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off ...@@ -429,9 +428,8 @@ ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
void __uasminit void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, u32 *first, u32 *end, u32 *target)
u32 *end, u32 *target)
{ {
long off = (long)(target - first); long off = (long)(target - first);
...@@ -442,7 +440,7 @@ ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 * ...@@ -442,7 +440,7 @@ ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
{ {
for (; rel->lab != UASM_LABEL_INVALID; rel++) { for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr if (rel->addr == addr
...@@ -456,83 +454,79 @@ int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) ...@@ -456,83 +454,79 @@ int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
/* Convenience functions for labeled branches. */ /* Convenience functions for labeled branches. */
void __uasminit void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bltz)(p, reg, 0); ISAFUNC(uasm_i_bltz)(p, reg, 0);
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
void __uasminit void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_b)(p, 0); ISAFUNC(uasm_i_b)(p, 0);
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
void __uasminit void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_beqz)(p, reg, 0); ISAFUNC(uasm_i_beqz)(p, reg, 0);
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
void __uasminit void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_beqzl)(p, reg, 0); ISAFUNC(uasm_i_beqzl)(p, reg, 0);
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
void __uasminit void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1, unsigned int reg2, int lid)
unsigned int reg2, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0); ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
void __uasminit void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bnez)(p, reg, 0); ISAFUNC(uasm_i_bnez)(p, reg, 0);
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
void __uasminit void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bgezl)(p, reg, 0); ISAFUNC(uasm_i_bgezl)(p, reg, 0);
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
void __uasminit void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bgez)(p, reg, 0); ISAFUNC(uasm_i_bgez)(p, reg, 0);
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
void __uasminit void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid)
unsigned int bit, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0); ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
} }
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0)); UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
void __uasminit void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid)
unsigned int bit, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0); ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
......
...@@ -32,7 +32,7 @@ static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) ...@@ -32,7 +32,7 @@ static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
/* /*
* Post-config but pre-boot cleanup entry point * Post-config but pre-boot cleanup entry point
*/ */
static void __cpuinit msmtc_init_secondary(void) static void msmtc_init_secondary(void)
{ {
int myvpe; int myvpe;
...@@ -53,7 +53,7 @@ static void __cpuinit msmtc_init_secondary(void) ...@@ -53,7 +53,7 @@ static void __cpuinit msmtc_init_secondary(void)
/* /*
* Platform "CPU" startup hook * Platform "CPU" startup hook
*/ */
static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle) static void msmtc_boot_secondary(int cpu, struct task_struct *idle)
{ {
smtc_boot_secondary(cpu, idle); smtc_boot_secondary(cpu, idle);
} }
...@@ -61,7 +61,7 @@ static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle) ...@@ -61,7 +61,7 @@ static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
/* /*
* SMP initialization finalization entry point * SMP initialization finalization entry point
*/ */
static void __cpuinit msmtc_smp_finish(void) static void msmtc_smp_finish(void)
{ {
smtc_smp_finish(); smtc_smp_finish();
} }
......
...@@ -150,7 +150,7 @@ static void __init plat_perf_setup(void) ...@@ -150,7 +150,7 @@ static void __init plat_perf_setup(void)
} }
} }
unsigned int __cpuinit get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
#ifdef MSC01E_INT_BASE #ifdef MSC01E_INT_BASE
if (cpu_has_veic) { if (cpu_has_veic) {
......
...@@ -91,7 +91,7 @@ static void __init plat_perf_setup(void) ...@@ -91,7 +91,7 @@ static void __init plat_perf_setup(void)
} }
} }
unsigned int __cpuinit get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
if (cpu_has_vint) if (cpu_has_vint)
set_vi_handler(cp0_compare_irq, mips_timer_dispatch); set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
......
...@@ -116,7 +116,7 @@ void nlm_early_init_secondary(int cpu) ...@@ -116,7 +116,7 @@ void nlm_early_init_secondary(int cpu)
/* /*
* Code to run on secondary just after probing the CPU * Code to run on secondary just after probing the CPU
*/ */
static void __cpuinit nlm_init_secondary(void) static void nlm_init_secondary(void)
{ {
int hwtid; int hwtid;
...@@ -252,7 +252,7 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask) ...@@ -252,7 +252,7 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
return 0; return 0;
} }
int __cpuinit nlm_wakeup_secondary_cpus(void) int nlm_wakeup_secondary_cpus(void)
{ {
u32 *reset_data; u32 *reset_data;
int threadmode; int threadmode;
......
...@@ -70,7 +70,6 @@ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */ ...@@ -70,7 +70,6 @@ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
nop nop
/* not reached */ /* not reached */
__CPUINIT
NESTED(nlm_boot_secondary_cpus, 16, sp) NESTED(nlm_boot_secondary_cpus, 16, sp)
/* Initialize CP0 Status */ /* Initialize CP0 Status */
move t1, zero move t1, zero
...@@ -94,7 +93,6 @@ NESTED(nlm_boot_secondary_cpus, 16, sp) ...@@ -94,7 +93,6 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
jr t0 jr t0
nop nop
END(nlm_boot_secondary_cpus) END(nlm_boot_secondary_cpus)
__FINIT
/* /*
* In case of RMIboot bootloader which is used on XLR boards, the CPUs * In case of RMIboot bootloader which is used on XLR boards, the CPUs
...@@ -102,7 +100,6 @@ END(nlm_boot_secondary_cpus) ...@@ -102,7 +100,6 @@ END(nlm_boot_secondary_cpus)
* This will get them out of the bootloader code and into linux. Needed * This will get them out of the bootloader code and into linux. Needed
* because the bootloader area will be taken and initialized by linux. * because the bootloader area will be taken and initialized by linux.
*/ */
__CPUINIT
NESTED(nlm_rmiboot_preboot, 16, sp) NESTED(nlm_rmiboot_preboot, 16, sp)
mfc0 t0, $15, 1 /* read ebase */ mfc0 t0, $15, 1 /* read ebase */
andi t0, 0x1f /* t0 has the processor_id() */ andi t0, 0x1f /* t0 has the processor_id() */
...@@ -140,4 +137,3 @@ NESTED(nlm_rmiboot_preboot, 16, sp) ...@@ -140,4 +137,3 @@ NESTED(nlm_rmiboot_preboot, 16, sp)
b 1b b 1b
nop nop
END(nlm_rmiboot_preboot) END(nlm_rmiboot_preboot)
__FINIT
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
#error "Unknown CPU" #error "Unknown CPU"
#endif #endif
unsigned int __cpuinit get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
return IRQ_TIMER; return IRQ_TIMER;
} }
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#include <asm/netlogic/xlr/iomap.h> #include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h> #include <asm/netlogic/xlr/pic.h>
int __cpuinit xlr_wakeup_secondary_cpus(void) int xlr_wakeup_secondary_cpus(void)
{ {
struct nlm_soc_info *nodep; struct nlm_soc_info *nodep;
unsigned int i, j, boot_cpu; unsigned int i, j, boot_cpu;
......
...@@ -42,7 +42,7 @@ int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS]; ...@@ -42,7 +42,7 @@ int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
extern struct pci_ops bridge_pci_ops; extern struct pci_ops bridge_pci_ops;
int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid) int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
{ {
unsigned long offset = NODE_OFFSET(nasid); unsigned long offset = NODE_OFFSET(nasid);
struct bridge_controller *bc; struct bridge_controller *bc;
......
...@@ -33,7 +33,7 @@ static void msp_smtc_send_ipi_mask(const struct cpumask *mask, ...@@ -33,7 +33,7 @@ static void msp_smtc_send_ipi_mask(const struct cpumask *mask,
/* /*
* Post-config but pre-boot cleanup entry point * Post-config but pre-boot cleanup entry point
*/ */
static void __cpuinit msp_smtc_init_secondary(void) static void msp_smtc_init_secondary(void)
{ {
int myvpe; int myvpe;
...@@ -48,8 +48,7 @@ static void __cpuinit msp_smtc_init_secondary(void) ...@@ -48,8 +48,7 @@ static void __cpuinit msp_smtc_init_secondary(void)
/* /*
* Platform "CPU" startup hook * Platform "CPU" startup hook
*/ */
static void __cpuinit msp_smtc_boot_secondary(int cpu, static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle)
struct task_struct *idle)
{ {
smtc_boot_secondary(cpu, idle); smtc_boot_secondary(cpu, idle);
} }
...@@ -57,7 +56,7 @@ static void __cpuinit msp_smtc_boot_secondary(int cpu, ...@@ -57,7 +56,7 @@ static void __cpuinit msp_smtc_boot_secondary(int cpu,
/* /*
* SMP initialization finalization entry point * SMP initialization finalization entry point
*/ */
static void __cpuinit msp_smtc_smp_finish(void) static void msp_smtc_smp_finish(void)
{ {
smtc_smp_finish(); smtc_smp_finish();
} }
......
...@@ -88,7 +88,7 @@ void __init plat_time_init(void) ...@@ -88,7 +88,7 @@ void __init plat_time_init(void)
mips_hpt_frequency = cpu_rate/2; mips_hpt_frequency = cpu_rate/2;
} }
unsigned int __cpuinit get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
/* MIPS_MT modes may want timer for second VPE */ /* MIPS_MT modes may want timer for second VPE */
if ((get_current_vpe()) && !tim_installed) { if ((get_current_vpe()) && !tim_installed) {
......
...@@ -281,7 +281,7 @@ void __init arch_init_irq(void) ...@@ -281,7 +281,7 @@ void __init arch_init_irq(void)
write_c0_status(read_c0_status() | IE_IRQ2); write_c0_status(read_c0_status() | IE_IRQ2);
} }
unsigned int __cpuinit get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
if (cpu_has_vint) if (cpu_has_vint)
set_vi_handler(cp0_compare_irq, pnx833x_timer_dispatch); set_vi_handler(cp0_compare_irq, pnx833x_timer_dispatch);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "powertv-clock.h" #include "powertv-clock.h"
unsigned int __cpuinit get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
return irq_mips_timer; return irq_mips_timer;
} }
......
...@@ -73,7 +73,7 @@ static struct irq_chip ralink_intc_irq_chip = { ...@@ -73,7 +73,7 @@ static struct irq_chip ralink_intc_irq_chip = {
.irq_mask_ack = ralink_intc_irq_mask, .irq_mask_ack = ralink_intc_irq_mask,
}; };
unsigned int __cpuinit get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
return CP0_LEGACY_COMPARE_IRQ; return CP0_LEGACY_COMPARE_IRQ;
} }
......
...@@ -54,7 +54,7 @@ extern void pcibr_setup(cnodeid_t); ...@@ -54,7 +54,7 @@ extern void pcibr_setup(cnodeid_t);
extern void xtalk_probe_node(cnodeid_t nid); extern void xtalk_probe_node(cnodeid_t nid);
static void __cpuinit per_hub_init(cnodeid_t cnode) static void per_hub_init(cnodeid_t cnode)
{ {
struct hub_data *hub = hub_data(cnode); struct hub_data *hub = hub_data(cnode);
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
...@@ -110,7 +110,7 @@ static void __cpuinit per_hub_init(cnodeid_t cnode) ...@@ -110,7 +110,7 @@ static void __cpuinit per_hub_init(cnodeid_t cnode)
} }
} }
void __cpuinit per_cpu_init(void) void per_cpu_init(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int slice = LOCAL_HUB_L(PI_CPU_NUM); int slice = LOCAL_HUB_L(PI_CPU_NUM);
......
...@@ -173,12 +173,12 @@ static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action) ...@@ -173,12 +173,12 @@ static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
ip27_send_ipi_single(i, action); ip27_send_ipi_single(i, action);
} }
static void __cpuinit ip27_init_secondary(void) static void ip27_init_secondary(void)
{ {
per_cpu_init(); per_cpu_init();
} }
static void __cpuinit ip27_smp_finish(void) static void ip27_smp_finish(void)
{ {
extern void hub_rt_clock_event_init(void); extern void hub_rt_clock_event_init(void);
...@@ -195,7 +195,7 @@ static void __init ip27_cpus_done(void) ...@@ -195,7 +195,7 @@ static void __init ip27_cpus_done(void)
* set sp to the kernel stack of the newly created idle process, gp to the proc * set sp to the kernel stack of the newly created idle process, gp to the proc
* struct so that current_thread_info() will work. * struct so that current_thread_info() will work.
*/ */
static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle) static void ip27_boot_secondary(int cpu, struct task_struct *idle)
{ {
unsigned long gp = (unsigned long)task_thread_info(idle); unsigned long gp = (unsigned long)task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle); unsigned long sp = __KSTK_TOS(idle);
......
...@@ -106,7 +106,7 @@ struct irqaction hub_rt_irqaction = { ...@@ -106,7 +106,7 @@ struct irqaction hub_rt_irqaction = {
#define NSEC_PER_CYCLE 800 #define NSEC_PER_CYCLE 800
#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE) #define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
void __cpuinit hub_rt_clock_event_init(void) void hub_rt_clock_event_init(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
...@@ -173,7 +173,7 @@ void __init plat_time_init(void) ...@@ -173,7 +173,7 @@ void __init plat_time_init(void)
hub_rt_clock_event_init(); hub_rt_clock_event_init();
} }
void __cpuinit cpu_time_init(void) void cpu_time_init(void)
{ {
lboard_t *board; lboard_t *board;
klcpu_t *cpu; klcpu_t *cpu;
...@@ -194,7 +194,7 @@ void __cpuinit cpu_time_init(void) ...@@ -194,7 +194,7 @@ void __cpuinit cpu_time_init(void)
set_c0_status(SRB_TIMOCLK); set_c0_status(SRB_TIMOCLK);
} }
void __cpuinit hub_rtc_init(cnodeid_t cnode) void hub_rtc_init(cnodeid_t cnode)
{ {
/* /*
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
extern int bridge_probe(nasid_t nasid, int widget, int masterwid); extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid) static int probe_one_port(nasid_t nasid, int widget, int masterwid)
{ {
widgetreg_t widget_id; widgetreg_t widget_id;
xwidget_part_num_t partnum; xwidget_part_num_t partnum;
...@@ -47,7 +47,7 @@ static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid) ...@@ -47,7 +47,7 @@ static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
return 0; return 0;
} }
static int __cpuinit xbow_probe(nasid_t nasid) static int xbow_probe(nasid_t nasid)
{ {
lboard_t *brd; lboard_t *brd;
klxbow_t *xbow_p; klxbow_t *xbow_p;
...@@ -100,7 +100,7 @@ static int __cpuinit xbow_probe(nasid_t nasid) ...@@ -100,7 +100,7 @@ static int __cpuinit xbow_probe(nasid_t nasid)
return 0; return 0;
} }
void __cpuinit xtalk_probe_node(cnodeid_t nid) void xtalk_probe_node(cnodeid_t nid)
{ {
volatile u64 hubreg; volatile u64 hubreg;
nasid_t nasid; nasid_t nasid;
......
...@@ -60,7 +60,7 @@ static void *mailbox_0_regs[] = { ...@@ -60,7 +60,7 @@ static void *mailbox_0_regs[] = {
/* /*
* SMP init and finish on secondary CPUs * SMP init and finish on secondary CPUs
*/ */
void __cpuinit bcm1480_smp_init(void) void bcm1480_smp_init(void)
{ {
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0; STATUSF_IP1 | STATUSF_IP0;
...@@ -95,7 +95,7 @@ static void bcm1480_send_ipi_mask(const struct cpumask *mask, ...@@ -95,7 +95,7 @@ static void bcm1480_send_ipi_mask(const struct cpumask *mask,
/* /*
* Code to run on secondary just after probing the CPU * Code to run on secondary just after probing the CPU
*/ */
static void __cpuinit bcm1480_init_secondary(void) static void bcm1480_init_secondary(void)
{ {
extern void bcm1480_smp_init(void); extern void bcm1480_smp_init(void);
...@@ -106,7 +106,7 @@ static void __cpuinit bcm1480_init_secondary(void) ...@@ -106,7 +106,7 @@ static void __cpuinit bcm1480_init_secondary(void)
* Do any tidying up before marking online and running the idle * Do any tidying up before marking online and running the idle
* loop * loop
*/ */
static void __cpuinit bcm1480_smp_finish(void) static void bcm1480_smp_finish(void)
{ {
extern void sb1480_clockevent_init(void); extern void sb1480_clockevent_init(void);
...@@ -125,7 +125,7 @@ static void bcm1480_cpus_done(void) ...@@ -125,7 +125,7 @@ static void bcm1480_cpus_done(void)
* Setup the PC, SP, and GP of a secondary processor and start it * Setup the PC, SP, and GP of a secondary processor and start it
* running! * running!
*/ */
static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) static void bcm1480_boot_secondary(int cpu, struct task_struct *idle)
{ {
int retval; int retval;
......
...@@ -48,7 +48,7 @@ static void *mailbox_regs[] = { ...@@ -48,7 +48,7 @@ static void *mailbox_regs[] = {
/* /*
* SMP init and finish on secondary CPUs * SMP init and finish on secondary CPUs
*/ */
void __cpuinit sb1250_smp_init(void) void sb1250_smp_init(void)
{ {
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0; STATUSF_IP1 | STATUSF_IP0;
...@@ -83,7 +83,7 @@ static inline void sb1250_send_ipi_mask(const struct cpumask *mask, ...@@ -83,7 +83,7 @@ static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
/* /*
* Code to run on secondary just after probing the CPU * Code to run on secondary just after probing the CPU
*/ */
static void __cpuinit sb1250_init_secondary(void) static void sb1250_init_secondary(void)
{ {
extern void sb1250_smp_init(void); extern void sb1250_smp_init(void);
...@@ -94,7 +94,7 @@ static void __cpuinit sb1250_init_secondary(void) ...@@ -94,7 +94,7 @@ static void __cpuinit sb1250_init_secondary(void)
* Do any tidying up before marking online and running the idle * Do any tidying up before marking online and running the idle
* loop * loop
*/ */
static void __cpuinit sb1250_smp_finish(void) static void sb1250_smp_finish(void)
{ {
extern void sb1250_clockevent_init(void); extern void sb1250_clockevent_init(void);
...@@ -113,7 +113,7 @@ static void sb1250_cpus_done(void) ...@@ -113,7 +113,7 @@ static void sb1250_cpus_done(void)
* Setup the PC, SP, and GP of a secondary processor and start it * Setup the PC, SP, and GP of a secondary processor and start it
* running! * running!
*/ */
static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle) static void sb1250_boot_secondary(int cpu, struct task_struct *idle)
{ {
int retval; int retval;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment