Commit 95519b26 authored by Anton Blanchard's avatar Anton Blanchard

replace flush_tlb_all with flush_tlb_kernel_range which allows

optimisations on some architectures.
parent 513bf064
...@@ -261,15 +261,6 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) ...@@ -261,15 +261,6 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
} }
} }
void
flush_tlb_all(void)
{
/* Implemented to just flush the vmalloc area.
* vmalloc is the only user of flush_tlb_all.
*/
__flush_tlb_range(NULL, VMALLOC_START, VMALLOC_END);
}
void void
flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm(struct mm_struct *mm)
{ {
......
...@@ -152,4 +152,6 @@ extern void flush_tlb_range(struct vm_area_struct *, unsigned long, ...@@ -152,4 +152,6 @@ extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif /* _ALPHA_TLBFLUSH_H */ #endif /* _ALPHA_TLBFLUSH_H */
...@@ -67,6 +67,7 @@ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) ...@@ -67,6 +67,7 @@ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
* - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
* *
* ..but the i386 has somewhat limited tlb flushing capabilities, * ..but the i386 has somewhat limited tlb flushing capabilities,
...@@ -132,6 +133,8 @@ extern struct tlb_state cpu_tlbstate[NR_CPUS]; ...@@ -132,6 +133,8 @@ extern struct tlb_state cpu_tlbstate[NR_CPUS];
#endif #endif
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
static inline void flush_tlb_pgtables(struct mm_struct *mm, static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
...@@ -8,14 +8,13 @@ ...@@ -8,14 +8,13 @@
/* /*
* TLB flushing: * TLB flushing:
* *
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/ */
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void __flush_tlb_range(struct mm_struct *mm, extern void __flush_tlb_range(struct mm_struct *mm,
...@@ -23,6 +22,9 @@ extern void __flush_tlb_range(struct mm_struct *mm, ...@@ -23,6 +22,9 @@ extern void __flush_tlb_range(struct mm_struct *mm,
#define flush_tlb_range(vma, start, end) \ #define flush_tlb_range(vma, start, end) \
__flush_tlb_range(vma->vm_mm, start, end) __flush_tlb_range(vma->vm_mm, start, end)
#define flush_tlb_kernel_range(start, end) \
__flush_tlb_range(&init_mm, (start), (end))
extern inline void flush_tlb_pgtables(struct mm_struct *mm, extern inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
...@@ -88,7 +88,7 @@ static void flush_all_zero_pkmaps(void) ...@@ -88,7 +88,7 @@ static void flush_all_zero_pkmaps(void)
page->virtual = NULL; page->virtual = NULL;
} }
flush_tlb_all(); flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
} }
static inline unsigned long map_new_virtual(struct page *page) static inline unsigned long map_new_virtual(struct page *page)
......
...@@ -81,6 +81,7 @@ static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned lo ...@@ -81,6 +81,7 @@ static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned lo
void vmfree_area_pages(unsigned long address, unsigned long size) void vmfree_area_pages(unsigned long address, unsigned long size)
{ {
pgd_t * dir; pgd_t * dir;
unsigned long start = address;
unsigned long end = address + size; unsigned long end = address + size;
dir = pgd_offset_k(address); dir = pgd_offset_k(address);
...@@ -90,7 +91,7 @@ void vmfree_area_pages(unsigned long address, unsigned long size) ...@@ -90,7 +91,7 @@ void vmfree_area_pages(unsigned long address, unsigned long size)
address = (address + PGDIR_SIZE) & PGDIR_MASK; address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++; dir++;
} while (address && (address < end)); } while (address && (address < end));
flush_tlb_all(); flush_tlb_kernel_range(start, end);
} }
static inline int alloc_area_pte (pte_t * pte, unsigned long address, static inline int alloc_area_pte (pte_t * pte, unsigned long address,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment