Commit 0aec9814 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: Make a number of segment table functions static.

From: Anton Blanchard <anton@samba.org>

Make a number of segment table functions static.
Clean up one duplicated slbia.
parent e2caa640
...@@ -18,11 +18,10 @@ ...@@ -18,11 +18,10 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/naca.h> #include <asm/naca.h>
#include <asm/pmc.h>
#include <asm/cputable.h> #include <asm/cputable.h>
int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid); static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
void make_slbe(unsigned long esid, unsigned long vsid, int large, static void make_slbe(unsigned long esid, unsigned long vsid, int large,
int kernel_segment); int kernel_segment);
/* /*
...@@ -69,7 +68,7 @@ DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]); ...@@ -69,7 +68,7 @@ DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
/* /*
* Create a segment table entry for the given esid/vsid pair. * Create a segment table entry for the given esid/vsid pair.
*/ */
int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
{ {
unsigned long entry, group, old_esid, castout_entry, i; unsigned long entry, group, old_esid, castout_entry, i;
unsigned int global_entry; unsigned int global_entry;
...@@ -88,7 +87,7 @@ int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) ...@@ -88,7 +87,7 @@ int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
ste->dw0.dw0.kp = 1; ste->dw0.dw0.kp = 1;
asm volatile("eieio":::"memory"); asm volatile("eieio":::"memory");
ste->dw0.dw0.v = 1; ste->dw0.dw0.v = 1;
return(global_entry | entry); return (global_entry | entry);
} }
} }
/* Now search the secondary group. */ /* Now search the secondary group. */
...@@ -254,8 +253,6 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -254,8 +253,6 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
ste = stab + __get_cpu_var(stab_cache[i]); ste = stab + __get_cpu_var(stab_cache[i]);
ste->dw0.dw0.v = 0; ste->dw0.dw0.v = 0;
} }
asm volatile("sync; slbia; sync":::"memory");
} else { } else {
unsigned long entry; unsigned long entry;
...@@ -273,9 +270,9 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -273,9 +270,9 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
ste->dw0.dw0.v = 0; ste->dw0.dw0.v = 0;
} }
} }
}
asm volatile("sync; slbia; sync":::"memory"); asm volatile("sync; slbia; sync":::"memory");
}
*offset = 0; *offset = 0;
...@@ -292,7 +289,7 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -292,7 +289,7 @@ void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
* NOTE: A context syncronising instruction is required before and after * NOTE: A context syncronising instruction is required before and after
* this, in the common case we use exception entry and rfid. * this, in the common case we use exception entry and rfid.
*/ */
void make_slbe(unsigned long esid, unsigned long vsid, int large, static void make_slbe(unsigned long esid, unsigned long vsid, int large,
int kernel_segment) int kernel_segment)
{ {
unsigned long entry, castout_entry; unsigned long entry, castout_entry;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment