Commit 8c37bb3a authored by Paul Gortmaker's avatar Paul Gortmaker

clocksource+irqchip: delete __cpuinit usage from all related files

The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications.  For example, the fix in
commit 5e427ec2 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out.  Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

This removes all the drivers/clocksource and drivers/irqchip uses of
the __cpuinit macros from all C files.

[1] https://lkml.org/lkml/2013/5/20/589

Cc: John Stultz <john.stultz@linaro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPaul Gortmaker <paul.gortmaker@windriver.com>
parent 148f9bb8
...@@ -123,7 +123,7 @@ static int arch_timer_set_next_event_phys(unsigned long evt, ...@@ -123,7 +123,7 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0; return 0;
} }
static int __cpuinit arch_timer_setup(struct clock_event_device *clk) static int arch_timer_setup(struct clock_event_device *clk)
{ {
clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer"; clk->name = "arch_sys_timer";
...@@ -221,7 +221,7 @@ struct timecounter *arch_timer_get_timecounter(void) ...@@ -221,7 +221,7 @@ struct timecounter *arch_timer_get_timecounter(void)
return &timecounter; return &timecounter;
} }
static void __cpuinit arch_timer_stop(struct clock_event_device *clk) static void arch_timer_stop(struct clock_event_device *clk)
{ {
pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
clk->irq, smp_processor_id()); clk->irq, smp_processor_id());
...@@ -237,7 +237,7 @@ static void __cpuinit arch_timer_stop(struct clock_event_device *clk) ...@@ -237,7 +237,7 @@ static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
} }
static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, static int arch_timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
/* /*
...@@ -256,7 +256,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, ...@@ -256,7 +256,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK; return NOTIFY_OK;
} }
static struct notifier_block arch_timer_cpu_nb __cpuinitdata = { static struct notifier_block arch_timer_cpu_nb = {
.notifier_call = arch_timer_cpu_notify, .notifier_call = arch_timer_cpu_notify,
}; };
......
...@@ -164,7 +164,7 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id) ...@@ -164,7 +164,7 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int __cpuinit gt_clockevents_init(struct clock_event_device *clk) static int gt_clockevents_init(struct clock_event_device *clk)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -221,8 +221,8 @@ static void __init gt_clocksource_init(void) ...@@ -221,8 +221,8 @@ static void __init gt_clocksource_init(void)
clocksource_register_hz(&gt_clocksource, gt_clk_rate); clocksource_register_hz(&gt_clocksource, gt_clk_rate);
} }
static int __cpuinit gt_cpu_notify(struct notifier_block *self, static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
unsigned long action, void *hcpu) void *hcpu)
{ {
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING: case CPU_STARTING:
...@@ -235,7 +235,7 @@ static int __cpuinit gt_cpu_notify(struct notifier_block *self, ...@@ -235,7 +235,7 @@ static int __cpuinit gt_cpu_notify(struct notifier_block *self,
return NOTIFY_OK; return NOTIFY_OK;
} }
static struct notifier_block gt_cpu_nb __cpuinitdata = { static struct notifier_block gt_cpu_nb = {
.notifier_call = gt_cpu_notify, .notifier_call = gt_cpu_notify,
}; };
......
...@@ -25,7 +25,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode, ...@@ -25,7 +25,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode,
*/ */
} }
static void __cpuinit dummy_timer_setup(void) static void dummy_timer_setup(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt); struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt);
...@@ -41,7 +41,7 @@ static void __cpuinit dummy_timer_setup(void) ...@@ -41,7 +41,7 @@ static void __cpuinit dummy_timer_setup(void)
clockevents_register_device(evt); clockevents_register_device(evt);
} }
static int __cpuinit dummy_timer_cpu_notify(struct notifier_block *self, static int dummy_timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
...@@ -50,7 +50,7 @@ static int __cpuinit dummy_timer_cpu_notify(struct notifier_block *self, ...@@ -50,7 +50,7 @@ static int __cpuinit dummy_timer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK; return NOTIFY_OK;
} }
static struct notifier_block dummy_timer_cpu_nb __cpuinitdata = { static struct notifier_block dummy_timer_cpu_nb = {
.notifier_call = dummy_timer_cpu_notify, .notifier_call = dummy_timer_cpu_notify,
}; };
......
...@@ -400,7 +400,7 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) ...@@ -400,7 +400,7 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt) static int exynos4_local_timer_setup(struct clock_event_device *evt)
{ {
struct mct_clock_event_device *mevt; struct mct_clock_event_device *mevt;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -448,7 +448,7 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt) ...@@ -448,7 +448,7 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt)
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
} }
static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = { static struct local_timer_ops exynos4_mct_tick_ops = {
.setup = exynos4_local_timer_setup, .setup = exynos4_local_timer_setup,
.stop = exynos4_local_timer_stop, .stop = exynos4_local_timer_stop,
}; };
......
...@@ -109,7 +109,7 @@ unsigned long long sched_clock(void) ...@@ -109,7 +109,7 @@ unsigned long long sched_clock(void)
return ticks << HARDWARE_TO_NS_SHIFT; return ticks << HARDWARE_TO_NS_SHIFT;
} }
static void __cpuinit arch_timer_setup(unsigned int cpu) static void arch_timer_setup(unsigned int cpu)
{ {
unsigned int txdivtime; unsigned int txdivtime;
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
...@@ -154,7 +154,7 @@ static void __cpuinit arch_timer_setup(unsigned int cpu) ...@@ -154,7 +154,7 @@ static void __cpuinit arch_timer_setup(unsigned int cpu)
} }
} }
static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, static int arch_timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu) unsigned long action, void *hcpu)
{ {
int cpu = (long)hcpu; int cpu = (long)hcpu;
...@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, ...@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK; return NOTIFY_OK;
} }
static struct notifier_block __cpuinitdata arch_timer_cpu_nb = { static struct notifier_block arch_timer_cpu_nb = {
.notifier_call = arch_timer_cpu_notify, .notifier_call = arch_timer_cpu_notify,
}; };
......
...@@ -167,7 +167,7 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) ...@@ -167,7 +167,7 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
/* /*
* Setup the local clock events for a CPU. * Setup the local clock events for a CPU.
*/ */
static int __cpuinit armada_370_xp_timer_setup(struct clock_event_device *evt) static int armada_370_xp_timer_setup(struct clock_event_device *evt)
{ {
u32 u; u32 u;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -205,7 +205,7 @@ static void armada_370_xp_timer_stop(struct clock_event_device *evt) ...@@ -205,7 +205,7 @@ static void armada_370_xp_timer_stop(struct clock_event_device *evt)
disable_percpu_irq(evt->irq); disable_percpu_irq(evt->irq);
} }
static struct local_timer_ops armada_370_xp_local_timer_ops __cpuinitdata = { static struct local_timer_ops armada_370_xp_local_timer_ops = {
.setup = armada_370_xp_timer_setup, .setup = armada_370_xp_timer_setup,
.stop = armada_370_xp_timer_stop, .stop = armada_370_xp_timer_stop,
}; };
......
...@@ -184,7 +184,7 @@ static struct irqaction sirfsoc_timer1_irq = { ...@@ -184,7 +184,7 @@ static struct irqaction sirfsoc_timer1_irq = {
.handler = sirfsoc_timer_interrupt, .handler = sirfsoc_timer_interrupt,
}; };
static int __cpuinit sirfsoc_local_timer_setup(struct clock_event_device *ce) static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
{ {
/* Use existing clock_event for cpu 0 */ /* Use existing clock_event for cpu 0 */
if (!smp_processor_id()) if (!smp_processor_id())
...@@ -216,7 +216,7 @@ static void sirfsoc_local_timer_stop(struct clock_event_device *ce) ...@@ -216,7 +216,7 @@ static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
} }
static struct local_timer_ops sirfsoc_local_timer_ops __cpuinitdata = { static struct local_timer_ops sirfsoc_local_timer_ops = {
.setup = sirfsoc_local_timer_setup, .setup = sirfsoc_local_timer_setup,
.stop = sirfsoc_local_timer_stop, .stop = sirfsoc_local_timer_stop,
}; };
......
...@@ -414,7 +414,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic) ...@@ -414,7 +414,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
writel_relaxed(1, base + GIC_DIST_CTRL); writel_relaxed(1, base + GIC_DIST_CTRL);
} }
static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) static void gic_cpu_init(struct gic_chip_data *gic)
{ {
void __iomem *dist_base = gic_data_dist_base(gic); void __iomem *dist_base = gic_data_dist_base(gic);
void __iomem *base = gic_data_cpu_base(gic); void __iomem *base = gic_data_cpu_base(gic);
...@@ -702,8 +702,8 @@ static int gic_irq_domain_xlate(struct irq_domain *d, ...@@ -702,8 +702,8 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int __cpuinit gic_secondary_init(struct notifier_block *nfb, static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
unsigned long action, void *hcpu) void *hcpu)
{ {
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
gic_cpu_init(&gic_data[0]); gic_cpu_init(&gic_data[0]);
...@@ -714,7 +714,7 @@ static int __cpuinit gic_secondary_init(struct notifier_block *nfb, ...@@ -714,7 +714,7 @@ static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
* Notifier for enabling the GIC CPU interface. Set an arbitrarily high * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
* priority because the GIC needs to be up before the ARM generic timers. * priority because the GIC needs to be up before the ARM generic timers.
*/ */
static struct notifier_block __cpuinitdata gic_cpu_notifier = { static struct notifier_block gic_cpu_notifier = {
.notifier_call = gic_secondary_init, .notifier_call = gic_secondary_init,
.priority = 100, .priority = 100,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment