Commit 16da2f93 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: smp_64.c: remove unused exports and cleanup while at it

The exports are nowhere used. There is even no reason why they were
ever introduced.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 081e10b9
...@@ -176,9 +176,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -176,9 +176,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
f = &per_cpu(flush_state, sender); f = &per_cpu(flush_state, sender);
/* Could avoid this lock when /*
num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is * Could avoid this lock when
probably not worth checking this for a cache-hot lock. */ * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
* probably not worth checking this for a cache-hot lock.
*/
spin_lock(&f->tlbstate_lock); spin_lock(&f->tlbstate_lock);
f->flush_mm = mm; f->flush_mm = mm;
...@@ -202,12 +204,12 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -202,12 +204,12 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
int __cpuinit init_smp_flush(void) int __cpuinit init_smp_flush(void)
{ {
int i; int i;
for_each_cpu_mask(i, cpu_possible_map) { for_each_cpu_mask(i, cpu_possible_map) {
spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
} }
return 0; return 0;
} }
core_initcall(init_smp_flush); core_initcall(init_smp_flush);
void flush_tlb_current_task(void) void flush_tlb_current_task(void)
...@@ -224,7 +226,6 @@ void flush_tlb_current_task(void) ...@@ -224,7 +226,6 @@ void flush_tlb_current_task(void)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_current_task);
void flush_tlb_mm (struct mm_struct * mm) void flush_tlb_mm (struct mm_struct * mm)
{ {
...@@ -245,7 +246,6 @@ void flush_tlb_mm (struct mm_struct * mm) ...@@ -245,7 +246,6 @@ void flush_tlb_mm (struct mm_struct * mm)
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{ {
...@@ -268,7 +268,6 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -268,7 +268,6 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info) static void do_flush_tlb_all(void* info)
{ {
...@@ -325,9 +324,7 @@ void unlock_ipi_call_lock(void) ...@@ -325,9 +324,7 @@ void unlock_ipi_call_lock(void)
* this function sends a 'generic call function' IPI to all other CPU * this function sends a 'generic call function' IPI to all other CPU
* of the system defined in the mask. * of the system defined in the mask.
*/ */
static int __smp_call_function_mask(cpumask_t mask,
static int
__smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info, void (*func)(void *), void *info,
int wait) int wait)
{ {
...@@ -420,8 +417,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, ...@@ -420,8 +417,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait) int nonatomic, int wait)
{ {
/* prevent preemption and reschedule on another processor */ /* prevent preemption and reschedule on another processor */
int ret; int ret, me = get_cpu();
int me = get_cpu();
/* Can deadlock when called with interrupts disabled */ /* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment