Commit ea9af694 authored by Paul Mundt's avatar Paul Mundt

sh: Local TLB flushing variants for SMP prep.

Rename the existing flush routines to local_ variants for use by
the IPI-backed global flush routines on SMP.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 11c19656
...@@ -105,7 +105,6 @@ EXPORT_SYMBOL(__flush_purge_region); ...@@ -105,7 +105,6 @@ EXPORT_SYMBOL(__flush_purge_region);
EXPORT_SYMBOL(clear_user_page); EXPORT_SYMBOL(clear_user_page);
#endif #endif
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(__down_trylock); EXPORT_SYMBOL(__down_trylock);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -106,7 +106,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) ...@@ -106,7 +106,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
__flush_tlb_page(get_asid(), addr); flush_tlb_one(get_asid(), addr);
} }
/* /*
......
...@@ -39,7 +39,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) ...@@ -39,7 +39,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry); set_pte(pte, entry);
local_irq_save(flags); local_irq_save(flags);
__flush_tlb_page(get_asid(), p3_addr); flush_tlb_one(get_asid(), p3_addr);
local_irq_restore(flags); local_irq_restore(flags);
update_mmu_cache(NULL, p3_addr, entry); update_mmu_cache(NULL, p3_addr, entry);
__clear_user_page((void *)p3_addr, to); __clear_user_page((void *)p3_addr, to);
...@@ -74,7 +74,7 @@ void copy_user_page(void *to, void *from, unsigned long address, ...@@ -74,7 +74,7 @@ void copy_user_page(void *to, void *from, unsigned long address,
mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry); set_pte(pte, entry);
local_irq_save(flags); local_irq_save(flags);
__flush_tlb_page(get_asid(), p3_addr); flush_tlb_one(get_asid(), p3_addr);
local_irq_restore(flags); local_irq_restore(flags);
update_mmu_cache(NULL, p3_addr, entry); update_mmu_cache(NULL, p3_addr, entry);
__copy_user_page((void *)p3_addr, from, to); __copy_user_page((void *)p3_addr, from, to);
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -31,15 +31,15 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -31,15 +31,15 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
saved_asid = get_asid(); saved_asid = get_asid();
set_asid(asid); set_asid(asid);
} }
__flush_tlb_page(asid, page); flush_tlb_one(asid, page);
if (saved_asid != MMU_NO_ASID) if (saved_asid != MMU_NO_ASID)
set_asid(saved_asid); set_asid(saved_asid);
local_irq_restore(flags); local_irq_restore(flags);
} }
} }
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end) unsigned long end)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -67,7 +67,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -67,7 +67,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
set_asid(asid); set_asid(asid);
} }
while (start < end) { while (start < end) {
__flush_tlb_page(asid, start); flush_tlb_one(asid, start);
start += PAGE_SIZE; start += PAGE_SIZE;
} }
if (saved_asid != MMU_NO_ASID) if (saved_asid != MMU_NO_ASID)
...@@ -77,7 +77,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -77,7 +77,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
} }
} }
void flush_tlb_kernel_range(unsigned long start, unsigned long end) void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned long flags; unsigned long flags;
...@@ -86,7 +86,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -86,7 +86,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
local_irq_save(flags); local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
flush_tlb_all(); local_flush_tlb_all();
} else { } else {
unsigned long asid; unsigned long asid;
unsigned long saved_asid = get_asid(); unsigned long saved_asid = get_asid();
...@@ -97,7 +97,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -97,7 +97,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
end &= PAGE_MASK; end &= PAGE_MASK;
set_asid(asid); set_asid(asid);
while (start < end) { while (start < end) {
__flush_tlb_page(asid, start); flush_tlb_one(asid, start);
start += PAGE_SIZE; start += PAGE_SIZE;
} }
set_asid(saved_asid); set_asid(saved_asid);
...@@ -105,7 +105,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -105,7 +105,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
local_irq_restore(flags); local_irq_restore(flags);
} }
void flush_tlb_mm(struct mm_struct *mm) void local_flush_tlb_mm(struct mm_struct *mm)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -122,7 +122,7 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -122,7 +122,7 @@ void flush_tlb_mm(struct mm_struct *mm)
} }
} }
void flush_tlb_all(void) void local_flush_tlb_all(void)
{ {
unsigned long flags, status; unsigned long flags, status;
......
...@@ -13,39 +13,33 @@ ...@@ -13,39 +13,33 @@
/* /*
* Nothing too terribly exciting here .. * Nothing too terribly exciting here ..
*/ */
void local_flush_tlb_all(void)
void flush_tlb(void)
{
BUG();
}
void flush_tlb_all(void)
{ {
BUG(); BUG();
} }
void flush_tlb_mm(struct mm_struct *mm) void local_flush_tlb_mm(struct mm_struct *mm)
{ {
BUG(); BUG();
} }
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end) unsigned long end)
{ {
BUG(); BUG();
} }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{ {
BUG(); BUG();
} }
void __flush_tlb_page(unsigned long asid, unsigned long page) void local_flush_tlb_one(unsigned long asid, unsigned long page)
{ {
BUG(); BUG();
} }
void flush_tlb_kernel_range(unsigned long start, unsigned long end) void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
BUG(); BUG();
} }
...@@ -55,4 +49,3 @@ void update_mmu_cache(struct vm_area_struct * vma, ...@@ -55,4 +49,3 @@ void update_mmu_cache(struct vm_area_struct * vma,
{ {
BUG(); BUG();
} }
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
void __flush_tlb_page(unsigned long asid, unsigned long page) void local_flush_tlb_one(unsigned long asid, unsigned long page)
{ {
unsigned long addr, data; unsigned long addr, data;
int i, ways = MMU_NTLB_WAYS; int i, ways = MMU_NTLB_WAYS;
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
void __flush_tlb_page(unsigned long asid, unsigned long page) void local_flush_tlb_one(unsigned long asid, unsigned long page)
{ {
unsigned long addr, data; unsigned long addr, data;
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
/* /*
* TLB flushing: * TLB flushing:
* *
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs * - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
...@@ -12,20 +11,45 @@ ...@@ -12,20 +11,45 @@
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/ */
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page);
extern void local_flush_tlb_kernel_range(unsigned long start,
unsigned long end);
extern void local_flush_tlb_one(unsigned long asid, unsigned long page);
#ifdef CONFIG_SMP
extern void flush_tlb(void);
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void __flush_tlb_page(unsigned long asid, unsigned long page); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tlb_one(unsigned long asid, unsigned long page);
#else
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_one(asid, page) local_flush_tlb_one(asid, page)
#define flush_tlb_range(vma, start, end) \
local_flush_tlb_range(vma, start, end)
#define flush_tlb_kernel_range(start, end) \
local_flush_tlb_kernel_range(start, end)
#endif /* CONFIG_SMP */
static inline void flush_tlb_pgtables(struct mm_struct *mm, static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ /* Nothing to do */ {
/* Nothing to do */
} }
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#endif /* __ASM_SH_TLBFLUSH_H */ #endif /* __ASM_SH_TLBFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment