Commit 722a9f92 authored by Andi Kleen's avatar Andi Kleen Committed by H. Peter Anvin

asmlinkage: Add explicit __visible to drivers/*, lib/*, kernel/*

As requested by Linus add explicit __visible to the asmlinkage users.
This marks functions visible to assembler.

Tree sweep for rest of tree.
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/1398984278-29319-4-git-send-email-andi@firstfloor.orgSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 2605fc21
...@@ -37,7 +37,7 @@ __visible struct { ...@@ -37,7 +37,7 @@ __visible struct {
* kernel begins at offset 3GB... * kernel begins at offset 3GB...
*/ */
asmlinkage void pnp_bios_callfunc(void); asmlinkage __visible void pnp_bios_callfunc(void);
__asm__(".text \n" __asm__(".text \n"
__ALIGN_STR "\n" __ALIGN_STR "\n"
......
...@@ -476,7 +476,7 @@ static void __init mm_init(void) ...@@ -476,7 +476,7 @@ static void __init mm_init(void)
vmalloc_init(); vmalloc_init();
} }
asmlinkage void __init start_kernel(void) asmlinkage __visible void __init start_kernel(void)
{ {
char * command_line; char * command_line;
extern const struct kernel_param __start___param[], __stop___param[]; extern const struct kernel_param __start___param[], __stop___param[];
......
...@@ -120,7 +120,7 @@ void context_tracking_user_enter(void) ...@@ -120,7 +120,7 @@ void context_tracking_user_enter(void)
* instead of preempt_schedule() to exit user context if needed before * instead of preempt_schedule() to exit user context if needed before
* calling the scheduler. * calling the scheduler.
*/ */
asmlinkage void __sched notrace preempt_schedule_context(void) asmlinkage __visible void __sched notrace preempt_schedule_context(void)
{ {
enum ctx_state prev_ctx; enum ctx_state prev_ctx;
......
...@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task) ...@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task)
} }
EXPORT_SYMBOL_GPL(debug_show_held_locks); EXPORT_SYMBOL_GPL(debug_show_held_locks);
asmlinkage void lockdep_sys_exit(void) asmlinkage __visible void lockdep_sys_exit(void)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
......
...@@ -1586,7 +1586,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, ...@@ -1586,7 +1586,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
return -ENOMEM; return -ENOMEM;
} }
asmlinkage int swsusp_save(void) asmlinkage __visible int swsusp_save(void)
{ {
unsigned int nr_pages, nr_highmem; unsigned int nr_pages, nr_highmem;
......
...@@ -1674,7 +1674,7 @@ EXPORT_SYMBOL(printk_emit); ...@@ -1674,7 +1674,7 @@ EXPORT_SYMBOL(printk_emit);
* *
* See the vsnprintf() documentation for format string extensions over C99. * See the vsnprintf() documentation for format string extensions over C99.
*/ */
asmlinkage int printk(const char *fmt, ...) asmlinkage __visible int printk(const char *fmt, ...)
{ {
va_list args; va_list args;
int r; int r;
...@@ -1737,7 +1737,7 @@ void early_vprintk(const char *fmt, va_list ap) ...@@ -1737,7 +1737,7 @@ void early_vprintk(const char *fmt, va_list ap)
} }
} }
asmlinkage void early_printk(const char *fmt, ...) asmlinkage __visible void early_printk(const char *fmt, ...)
{ {
va_list ap; va_list ap;
......
...@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq) ...@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
* schedule_tail - first thing a freshly forked thread must call. * schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from. * @prev: the thread we just switched away from.
*/ */
asmlinkage void schedule_tail(struct task_struct *prev) asmlinkage __visible void schedule_tail(struct task_struct *prev)
__releases(rq->lock) __releases(rq->lock)
{ {
struct rq *rq = this_rq(); struct rq *rq = this_rq();
...@@ -2741,7 +2741,7 @@ static inline void sched_submit_work(struct task_struct *tsk) ...@@ -2741,7 +2741,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk); blk_schedule_flush_plug(tsk);
} }
asmlinkage void __sched schedule(void) asmlinkage __visible void __sched schedule(void)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
...@@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void) ...@@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void)
EXPORT_SYMBOL(schedule); EXPORT_SYMBOL(schedule);
#ifdef CONFIG_CONTEXT_TRACKING #ifdef CONFIG_CONTEXT_TRACKING
asmlinkage void __sched schedule_user(void) asmlinkage __visible void __sched schedule_user(void)
{ {
/* /*
* If we come here after a random call to set_need_resched(), * If we come here after a random call to set_need_resched(),
...@@ -2783,7 +2783,7 @@ void __sched schedule_preempt_disabled(void) ...@@ -2783,7 +2783,7 @@ void __sched schedule_preempt_disabled(void)
* off of preempt_enable. Kernel preemptions off return from interrupt * off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly. * occur there and call schedule directly.
*/ */
asmlinkage void __sched notrace preempt_schedule(void) asmlinkage __visible void __sched notrace preempt_schedule(void)
{ {
/* /*
* If there is a non-zero preempt_count or interrupts are disabled, * If there is a non-zero preempt_count or interrupts are disabled,
...@@ -2813,7 +2813,7 @@ EXPORT_SYMBOL(preempt_schedule); ...@@ -2813,7 +2813,7 @@ EXPORT_SYMBOL(preempt_schedule);
* Note, that this is called and return with irqs disabled. This will * Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq. * protect us against recursive calling from irq.
*/ */
asmlinkage void __sched preempt_schedule_irq(void) asmlinkage __visible void __sched preempt_schedule_irq(void)
{ {
enum ctx_state prev_state; enum ctx_state prev_state;
......
...@@ -223,7 +223,7 @@ static inline bool lockdep_softirq_start(void) { return false; } ...@@ -223,7 +223,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { } static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif #endif
asmlinkage void __do_softirq(void) asmlinkage __visible void __do_softirq(void)
{ {
unsigned long end = jiffies + MAX_SOFTIRQ_TIME; unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags; unsigned long old_flags = current->flags;
...@@ -299,7 +299,7 @@ asmlinkage void __do_softirq(void) ...@@ -299,7 +299,7 @@ asmlinkage void __do_softirq(void)
tsk_restore_flags(current, old_flags, PF_MEMALLOC); tsk_restore_flags(current, old_flags, PF_MEMALLOC);
} }
asmlinkage void do_softirq(void) asmlinkage __visible void do_softirq(void)
{ {
__u32 pending; __u32 pending;
unsigned long flags; unsigned long flags;
......
...@@ -23,7 +23,7 @@ static void __dump_stack(void) ...@@ -23,7 +23,7 @@ static void __dump_stack(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1); static atomic_t dump_lock = ATOMIC_INIT(-1);
asmlinkage void dump_stack(void) asmlinkage __visible void dump_stack(void)
{ {
int was_locked; int was_locked;
int old; int old;
...@@ -55,7 +55,7 @@ asmlinkage void dump_stack(void) ...@@ -55,7 +55,7 @@ asmlinkage void dump_stack(void)
preempt_enable(); preempt_enable();
} }
#else #else
asmlinkage void dump_stack(void) asmlinkage __visible void dump_stack(void)
{ {
__dump_stack(); __dump_stack();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment