Commit 171c2bcb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull unified TLB flushing from Ingo Molnar:
 "This contains the generic mmu_gather feature from Peter Zijlstra,
  which is an all-arch unification of TLB flushing APIs, via the
  following (broad) steps:

   - enhance the <asm-generic/tlb.h> APIs to cover more arch details

   - convert most TLB flushing arch implementations to the generic
     <asm-generic/tlb.h> APIs.

   - remove leftovers of per arch implementations

  After this series every single architecture makes use of the unified
  TLB flushing APIs"

* 'core-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  mm/resource: Use resource_overlaps() to simplify region_intersects()
  ia64/tlb: Eradicate tlb_migrate_finish() callback
  asm-generic/tlb: Remove tlb_table_flush()
  asm-generic/tlb: Remove tlb_flush_mmu_free()
  asm-generic/tlb: Remove CONFIG_HAVE_GENERIC_MMU_GATHER
  asm-generic/tlb: Remove arch_tlb*_mmu()
  s390/tlb: Convert to generic mmu_gather
  asm-generic/tlb: Introduce CONFIG_HAVE_MMU_GATHER_NO_GATHER=y
  arch/tlb: Clean up simple architectures
  um/tlb: Convert to generic mmu_gather
  sh/tlb: Convert SH to generic mmu_gather
  ia64/tlb: Convert to generic mmu_gather
  arm/tlb: Convert to generic mmu_gather
  asm-generic/tlb, arch: Invert CONFIG_HAVE_RCU_TABLE_INVALIDATE
  asm-generic/tlb, ia64: Conditionally provide tlb_migrate_finish()
  asm-generic/tlb: Provide generic tlb_flush() based on flush_tlb_mm()
  asm-generic/tlb, arch: Provide generic tlb_flush() based on flush_tlb_range()
  asm-generic/tlb, arch: Provide generic VIPT cache flush
  asm-generic/tlb, arch: Provide CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
  asm-generic/tlb: Provide a comment
parents 423ea325 f6c6010a
...@@ -101,16 +101,6 @@ changes occur: ...@@ -101,16 +101,6 @@ changes occur:
translations for software managed TLB configurations. translations for software managed TLB configurations.
The sparc64 port currently does this. The sparc64 port currently does this.
6) ``void tlb_migrate_finish(struct mm_struct *mm)``
This interface is called at the end of an explicit
process migration. This interface provides a hook
to allow a platform to update TLB or context-specific
information for the address space.
The ia64 sn2 platform is one example of a platform
that uses this interface.
Next, we have the cache flushing interfaces. In general, when Linux Next, we have the cache flushing interfaces. In general, when Linux
is changing an existing virtual-->physical mapping to a new value, is changing an existing virtual-->physical mapping to a new value,
the sequence will be in one of the following forms:: the sequence will be in one of the following forms::
......
...@@ -383,7 +383,13 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE ...@@ -383,7 +383,13 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
config HAVE_RCU_TABLE_FREE config HAVE_RCU_TABLE_FREE
bool bool
config HAVE_RCU_TABLE_INVALIDATE config HAVE_RCU_TABLE_NO_INVALIDATE
bool
config HAVE_MMU_GATHER_PAGE_SIZE
bool
config HAVE_MMU_GATHER_NO_GATHER
bool bool
config ARCH_HAVE_NMI_SAFE_CMPXCHG config ARCH_HAVE_NMI_SAFE_CMPXCHG
......
...@@ -36,6 +36,7 @@ config ALPHA ...@@ -36,6 +36,7 @@ config ALPHA
select ODD_RT_SIGACTION select ODD_RT_SIGACTION
select OLD_SIGSUSPEND select OLD_SIGSUSPEND
select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67 select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
select MMU_GATHER_NO_RANGE
help help
The Alpha is a 64-bit general-purpose processor designed and The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory, marketed by the Digital Equipment Corporation of blessed memory,
......
...@@ -2,12 +2,6 @@ ...@@ -2,12 +2,6 @@
#ifndef _ALPHA_TLB_H #ifndef _ALPHA_TLB_H
#define _ALPHA_TLB_H #define _ALPHA_TLB_H
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
......
...@@ -9,38 +9,6 @@ ...@@ -9,38 +9,6 @@
#ifndef _ASM_ARC_TLB_H #ifndef _ASM_ARC_TLB_H
#define _ASM_ARC_TLB_H #define _ASM_ARC_TLB_H
#define tlb_flush(tlb) \
do { \
if (tlb->fullmm) \
flush_tlb_mm((tlb)->mm); \
} while (0)
/*
* This pair is called at time of munmap/exit to flush cache and TLB entries
* for mappings being torn down.
* 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
* 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
*
* Note, read http://lkml.org/lkml/2004/1/15/6
*/
#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
#define tlb_start_vma(tlb, vma)
#else
#define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
#endif
#define tlb_end_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address)
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
......
...@@ -33,271 +33,42 @@ ...@@ -33,271 +33,42 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#define MMU_GATHER_BUNDLE 8
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
static inline void __tlb_remove_table(void *_table) static inline void __tlb_remove_table(void *_table)
{ {
free_page_and_swap_cache((struct page *)_table); free_page_and_swap_cache((struct page *)_table);
} }
struct mmu_table_batch { #include <asm-generic/tlb.h>
struct rcu_head rcu;
unsigned int nr;
void *tables[0];
};
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
#else
#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
/*
* TLB handling. This allows us to remove pages from the page
* tables, and efficiently handle the TLB issues.
*/
struct mmu_gather {
struct mm_struct *mm;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch;
unsigned int need_flush;
#endif
unsigned int fullmm;
struct vm_area_struct *vma;
unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
unsigned int max;
struct page **pages;
struct page *local[MMU_GATHER_BUNDLE];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
/*
* This is unnecessarily complex. There's three ways the TLB shootdown
* code is used:
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
* tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL.
* 2. Unmapping all vmas. See exit_mmap().
* tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL. Additionally, page tables will be freed.
* 3. Unmapping argument pages. See shift_arg_pages().
* tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
* tlb->vma will be NULL.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm || !tlb->vma)
flush_tlb_mm(tlb->mm);
else if (tlb->range_end > 0) {
flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
}
static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
}
}
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
{
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (addr) {
tlb->pages = (void *)addr;
tlb->max = PAGE_SIZE / sizeof(struct page *);
}
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
tlb_flush(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
}
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
if (tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
static inline void
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = !(start | (end+1));
tlb->start = start;
tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
__tlb_alloc_page(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifndef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL; #define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
#endif #endif
}
static inline void
arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
if (force) {
tlb->range_start = start;
tlb->range_end = end;
}
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
if (tlb->pages != tlb->local)
free_pages((unsigned long)tlb->pages, 0);
}
/*
* Memorize the range for the TLB flush.
*/
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
tlb_add_flush(tlb, addr);
}
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
flush_cache_range(vma, vma->vm_start, vma->vm_end);
tlb->vma = vma;
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
}
static inline void static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
if (!tlb->fullmm)
tlb_flush(tlb);
}
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->pages[tlb->nr++] = page;
VM_WARN_ON(tlb->nr > tlb->max);
if (tlb->nr == tlb->max)
return true;
return false;
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb);
}
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return __tlb_remove_page(tlb, page);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return tlb_remove_page(tlb, page);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{ {
pgtable_page_dtor(pte); pgtable_page_dtor(pte);
#ifdef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
tlb_add_flush(tlb, addr);
#else
/* /*
* With the classic ARM MMU, a pte page has two corresponding pmd * With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB. * entries, each covering 1MB.
*/ */
addr &= PMD_MASK; addr = (addr & PMD_MASK) + SZ_1M;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M);
#endif #endif
tlb_remove_entry(tlb, pte); tlb_remove_table(tlb, pte);
}
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
#ifdef CONFIG_ARM_LPAE
tlb_add_flush(tlb, addr);
tlb_remove_entry(tlb, virt_to_page(pmdp));
#endif
} }
static inline void static inline void
tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
tlb_add_flush(tlb, addr);
}
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{ {
} #ifdef CONFIG_ARM_LPAE
struct page *page = virt_to_page(pmdp);
static inline void tlb_flush_remove_tables(struct mm_struct *mm)
{
}
static inline void tlb_flush_remove_tables_local(void *arg) tlb_remove_table(tlb, page);
{ #endif
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -149,7 +149,6 @@ config ARM64 ...@@ -149,7 +149,6 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE select HAVE_RCU_TABLE_FREE
select HAVE_RCU_TABLE_INVALIDATE
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
......
...@@ -27,6 +27,7 @@ static inline void __tlb_remove_table(void *_table) ...@@ -27,6 +27,7 @@ static inline void __tlb_remove_table(void *_table)
free_page_and_swap_cache((struct page *)_table); free_page_and_swap_cache((struct page *)_table);
} }
#define tlb_flush tlb_flush
static void tlb_flush(struct mmu_gather *tlb); static void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
......
...@@ -20,6 +20,7 @@ config C6X ...@@ -20,6 +20,7 @@ config C6X
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select ARCH_NO_COHERENT_DMA_MMAP select ARCH_NO_COHERENT_DMA_MMAP
select MMU_GATHER_NO_RANGE if MMU
config MMU config MMU
def_bool n def_bool n
......
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
#ifndef _ASM_C6X_TLB_H #ifndef _ASM_C6X_TLB_H
#define _ASM_C6X_TLB_H #define _ASM_C6X_TLB_H
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif /* _ASM_C6X_TLB_H */ #endif /* _ASM_C6X_TLB_H */
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
#ifndef __H8300_TLB_H__ #ifndef __H8300_TLB_H__
#define __H8300_TLB_H__ #define __H8300_TLB_H__
#define tlb_flush(tlb) do { } while (0)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif #endif
...@@ -22,18 +22,6 @@ ...@@ -22,18 +22,6 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
/*
* We don't need any special per-pte or per-vma handling...
*/
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
/*
* .. because we flush the whole mm when it fills up
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif #endif
...@@ -30,7 +30,6 @@ typedef void ia64_mv_irq_init_t (void); ...@@ -30,7 +30,6 @@ typedef void ia64_mv_irq_init_t (void);
typedef void ia64_mv_send_ipi_t (int, int, int, int); typedef void ia64_mv_send_ipi_t (int, int, int, int);
typedef void ia64_mv_timer_interrupt_t (int, void *); typedef void ia64_mv_timer_interrupt_t (int, void *);
typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long); typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef u8 ia64_mv_irq_to_vector (int); typedef u8 ia64_mv_irq_to_vector (int);
typedef unsigned int ia64_mv_local_vector_to_irq (u8); typedef unsigned int ia64_mv_local_vector_to_irq (u8);
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
...@@ -79,11 +78,6 @@ machvec_noop (void) ...@@ -79,11 +78,6 @@ machvec_noop (void)
{ {
} }
static inline void
machvec_noop_mm (struct mm_struct *mm)
{
}
static inline void static inline void
machvec_noop_task (struct task_struct *task) machvec_noop_task (struct task_struct *task)
{ {
...@@ -96,7 +90,6 @@ machvec_noop_bus (struct pci_bus *bus) ...@@ -96,7 +90,6 @@ machvec_noop_bus (struct pci_bus *bus)
extern void machvec_setup (char **); extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *); extern void machvec_timer_interrupt (int, void *);
extern void machvec_tlb_migrate_finish (struct mm_struct *);
# if defined (CONFIG_IA64_HP_SIM) # if defined (CONFIG_IA64_HP_SIM)
# include <asm/machvec_hpsim.h> # include <asm/machvec_hpsim.h>
...@@ -124,7 +117,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); ...@@ -124,7 +117,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_send_ipi ia64_mv.send_ipi # define platform_send_ipi ia64_mv.send_ipi
# define platform_timer_interrupt ia64_mv.timer_interrupt # define platform_timer_interrupt ia64_mv.timer_interrupt
# define platform_global_tlb_purge ia64_mv.global_tlb_purge # define platform_global_tlb_purge ia64_mv.global_tlb_purge
# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
# define platform_dma_init ia64_mv.dma_init # define platform_dma_init ia64_mv.dma_init
# define platform_dma_get_ops ia64_mv.dma_get_ops # define platform_dma_get_ops ia64_mv.dma_get_ops
# define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_irq_to_vector ia64_mv.irq_to_vector
...@@ -167,7 +159,6 @@ struct ia64_machine_vector { ...@@ -167,7 +159,6 @@ struct ia64_machine_vector {
ia64_mv_send_ipi_t *send_ipi; ia64_mv_send_ipi_t *send_ipi;
ia64_mv_timer_interrupt_t *timer_interrupt; ia64_mv_timer_interrupt_t *timer_interrupt;
ia64_mv_global_tlb_purge_t *global_tlb_purge; ia64_mv_global_tlb_purge_t *global_tlb_purge;
ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
ia64_mv_dma_init *dma_init; ia64_mv_dma_init *dma_init;
ia64_mv_dma_get_ops *dma_get_ops; ia64_mv_dma_get_ops *dma_get_ops;
ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_irq_to_vector *irq_to_vector;
...@@ -206,7 +197,6 @@ struct ia64_machine_vector { ...@@ -206,7 +197,6 @@ struct ia64_machine_vector {
platform_send_ipi, \ platform_send_ipi, \
platform_timer_interrupt, \ platform_timer_interrupt, \
platform_global_tlb_purge, \ platform_global_tlb_purge, \
platform_tlb_migrate_finish, \
platform_dma_init, \ platform_dma_init, \
platform_dma_get_ops, \ platform_dma_get_ops, \
platform_irq_to_vector, \ platform_irq_to_vector, \
...@@ -270,9 +260,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *); ...@@ -270,9 +260,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *);
#ifndef platform_global_tlb_purge #ifndef platform_global_tlb_purge
# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ # define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
#endif #endif
#ifndef platform_tlb_migrate_finish
# define platform_tlb_migrate_finish machvec_noop_mm
#endif
#ifndef platform_kernel_launch_event #ifndef platform_kernel_launch_event
# define platform_kernel_launch_event machvec_noop # define platform_kernel_launch_event machvec_noop
#endif #endif
......
...@@ -34,7 +34,6 @@ extern ia64_mv_irq_init_t sn_irq_init; ...@@ -34,7 +34,6 @@ extern ia64_mv_irq_init_t sn_irq_init;
extern ia64_mv_send_ipi_t sn2_send_IPI; extern ia64_mv_send_ipi_t sn2_send_IPI;
extern ia64_mv_timer_interrupt_t sn_timer_interrupt; extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge; extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
extern ia64_mv_irq_to_vector sn_irq_to_vector; extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq; extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem; extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
...@@ -77,7 +76,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; ...@@ -77,7 +76,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
#define platform_send_ipi sn2_send_IPI #define platform_send_ipi sn2_send_IPI
#define platform_timer_interrupt sn_timer_interrupt #define platform_timer_interrupt sn_timer_interrupt
#define platform_global_tlb_purge sn2_global_tlb_purge #define platform_global_tlb_purge sn2_global_tlb_purge
#define platform_tlb_migrate_finish sn_tlb_migrate_finish
#define platform_pci_fixup sn_pci_fixup #define platform_pci_fixup sn_pci_fixup
#define platform_inb __sn_inb #define platform_inb __sn_inb
#define platform_inw __sn_inw #define platform_inw __sn_inw
......
...@@ -47,263 +47,6 @@ ...@@ -47,263 +47,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/machvec.h> #include <asm/machvec.h>
/* #include <asm-generic/tlb.h>
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
*/
#define IA64_GATHER_BUNDLE 8
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr;
unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
unsigned long start, end;
unsigned long start_addr;
unsigned long end_addr;
struct page **pages;
struct page *local[IA64_GATHER_BUNDLE];
};
struct ia64_tr_entry {
u64 ifa;
u64 itir;
u64 pte;
u64 rr;
}; /*Record for tr entry!*/
extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);
extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
region register macros
*/
#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
#define RR_VE_MASK 0x0000000000000001L
#define RR_VE_SHIFT 0
#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
#define RR_PS_MASK 0x00000000000000fcL
#define RR_PS_SHIFT 2
#define RR_RID_MASK 0x00000000ffffff00L
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
static inline void
ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
tlb->need_flush = 0;
if (tlb->fullmm) {
/*
* Tearing down the entire address space. This happens both as a result
* of exit() and execve(). The latter case necessitates the call to
* flush_tlb_mm() here.
*/
flush_tlb_mm(tlb->mm);
} else if (unlikely (end - start >= 1024*1024*1024*1024UL
|| REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
{
/*
* If we flush more than a tera-byte or across regions, we're probably
* better off just flushing the entire TLB(s). This should be very rare
* and is not worth optimizing for.
*/
flush_tlb_all();
} else {
/*
* flush_tlb_range() takes a vma instead of a mm pointer because
* some architectures want the vm_flags for ITLB/DTLB flush.
*/
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
/* flush the address range from the tlb: */
flush_tlb_range(&vma, start, end);
/* now flush the virt. page-table area mapping the address range: */
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
}
}
static inline void
ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
{
unsigned long i;
unsigned int nr;
/* lastly, release the freed pages */
nr = tlb->nr;
tlb->nr = 0;
tlb->start_addr = ~0UL;
for (i = 0; i < nr; ++i)
free_page_and_swap_cache(tlb->pages[i]);
}
/*
* Flush the TLB for address range START to END and, if not in fast mode, release the
* freed pages that where gathered up to this point.
*/
static inline void
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (!tlb->need_flush)
return;
ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
ia64_tlb_flush_mmu_free(tlb);
}
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
{
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (addr) {
tlb->pages = (void *)addr;
tlb->max = PAGE_SIZE / sizeof(void *);
}
}
static inline void
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
tlb->fullmm = !(start | (end+1));
tlb->start = start;
tlb->end = end;
tlb->start_addr = ~0UL;
}
/*
* Called at the end of the shootdown operation to free up any resources that were
* collected.
*/
static inline void
arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
if (force)
tlb->need_flush = 1;
/*
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
* tlb->end_addr.
*/
ia64_tlb_flush_mmu(tlb, start, end);
/* keep the page table cache within bounds */
check_pgt_cache();
if (tlb->pages != tlb->local)
free_pages((unsigned long)tlb->pages, 0);
}
/*
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
* must be delayed until after the TLB has been flushed (see comments at the beginning of
* this file).
*/
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;
if (!tlb->nr && tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
tlb->pages[tlb->nr++] = page;
VM_WARN_ON(tlb->nr > tlb->max);
if (tlb->nr == tlb->max)
return true;
return false;
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
}
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
ia64_tlb_flush_mmu_free(tlb);
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb);
}
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return __tlb_remove_page(tlb, page);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return tlb_remove_page(tlb, page);
}
/*
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
* PTE, not just those pointing to (normal) physical memory.
*/
static inline void
__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
{
if (tlb->start_addr == ~0UL)
tlb->start_addr = address;
tlb->end_addr = address + PAGE_SIZE;
}
#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, addr) \
do { \
tlb->need_flush = 1; \
__tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{
}
#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#define pmd_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
__pmd_free_tlb(tlb, ptep, address); \
} while (0)
#define pud_free_tlb(tlb, pudp, address) \
do { \
tlb->need_flush = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif /* _ASM_IA64_TLB_H */ #endif /* _ASM_IA64_TLB_H */
...@@ -14,6 +14,31 @@ ...@@ -14,6 +14,31 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/page.h> #include <asm/page.h>
struct ia64_tr_entry {
u64 ifa;
u64 itir;
u64 pte;
u64 rr;
}; /*Record for tr entry!*/
extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);
extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
region register macros
*/
#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
#define RR_VE_MASK 0x0000000000000001L
#define RR_VE_SHIFT 0
#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
#define RR_PS_MASK 0x00000000000000fcL
#define RR_PS_SHIFT 2
#define RR_RID_MASK 0x00000000ffffff00L
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
/* /*
* Now for some TLB flushing routines. This is the kind of stuff that * Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible. * can be very expensive, so try to avoid them whenever possible.
......
...@@ -305,8 +305,8 @@ local_flush_tlb_all (void) ...@@ -305,8 +305,8 @@ local_flush_tlb_all (void)
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
} }
void static void
flush_tlb_range (struct vm_area_struct *vma, unsigned long start, __flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
unsigned long end) unsigned long end)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
...@@ -343,6 +343,25 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, ...@@ -343,6 +343,25 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
preempt_enable(); preempt_enable();
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
} }
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (unlikely(end - start >= 1024*1024*1024*1024UL
|| REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
/*
* If we flush more than a tera-byte or across regions, we're
* probably better off just flushing the entire TLB(s). This
* should be very rare and is not worth optimizing for.
*/
flush_tlb_all();
} else {
/* flush the address range from the tlb */
__flush_tlb_range(vma, start, end);
/* flush the virt. page-table area mapping the addr range */
__flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
}
}
EXPORT_SYMBOL(flush_tlb_range); EXPORT_SYMBOL(flush_tlb_range);
void ia64_tlb_init(void) void ia64_tlb_init(void)
......
...@@ -120,13 +120,6 @@ void sn_migrate(struct task_struct *task) ...@@ -120,13 +120,6 @@ void sn_migrate(struct task_struct *task)
cpu_relax(); cpu_relax();
} }
void sn_tlb_migrate_finish(struct mm_struct *mm)
{
/* flush_tlb_mm is inefficient if more than 1 users of mm */
if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
flush_tlb_mm(mm);
}
static void static void
sn2_ipi_flush_all_tlb(struct mm_struct *mm) sn2_ipi_flush_all_tlb(struct mm_struct *mm)
{ {
......
...@@ -28,6 +28,7 @@ config M68K ...@@ -28,6 +28,7 @@ config M68K
select OLD_SIGSUSPEND3 select OLD_SIGSUSPEND3
select OLD_SIGACTION select OLD_SIGACTION
select ARCH_DISCARD_MEMBLOCK select ARCH_DISCARD_MEMBLOCK
select MMU_GATHER_NO_RANGE if MMU
config CPU_BIG_ENDIAN config CPU_BIG_ENDIAN
def_bool y def_bool y
......
...@@ -2,20 +2,6 @@ ...@@ -2,20 +2,6 @@
#ifndef _M68K_TLB_H #ifndef _M68K_TLB_H
#define _M68K_TLB_H #define _M68K_TLB_H
/*
* m68k doesn't need any special per-pte or
* per-vma handling..
*/
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
/*
* .. because we flush the whole mm when it
* fills up.
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif /* _M68K_TLB_H */ #endif /* _M68K_TLB_H */
...@@ -41,6 +41,7 @@ config MICROBLAZE ...@@ -41,6 +41,7 @@ config MICROBLAZE
select TRACING_SUPPORT select TRACING_SUPPORT
select VIRT_TO_BUS select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE if MMU
# Endianness selection # Endianness selection
choice choice
......
...@@ -11,16 +11,7 @@ ...@@ -11,16 +11,7 @@
#ifndef _ASM_MICROBLAZE_TLB_H #ifndef _ASM_MICROBLAZE_TLB_H
#define _ASM_MICROBLAZE_TLB_H #define _ASM_MICROBLAZE_TLB_H
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <linux/pagemap.h> #include <linux/pagemap.h>
#ifdef CONFIG_MMU
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#endif
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif /* _ASM_MICROBLAZE_TLB_H */ #endif /* _ASM_MICROBLAZE_TLB_H */
...@@ -5,23 +5,6 @@ ...@@ -5,23 +5,6 @@
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
/*
* MIPS doesn't need any special per-pte or per-vma handling, except
* we need to flush cache for area to be unmapped.
*/
#define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
/*
* .. because we flush the whole mm when it fills up.
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#define _UNIQUE_ENTRYHI(base, idx) \ #define _UNIQUE_ENTRYHI(base, idx) \
(((base) + ((idx) << (PAGE_SHIFT + 1))) | \ (((base) + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
......
...@@ -4,22 +4,6 @@ ...@@ -4,22 +4,6 @@
#ifndef __ASMNDS32_TLB_H #ifndef __ASMNDS32_TLB_H
#define __ASMNDS32_TLB_H #define __ASMNDS32_TLB_H
#define tlb_start_vma(tlb,vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb,vma) \
do { \
if(!tlb->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
......
...@@ -42,6 +42,5 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); ...@@ -42,6 +42,5 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void update_mmu_cache(struct vm_area_struct *vma, void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t * pte); unsigned long address, pte_t * pte);
void tlb_migrate_finish(struct mm_struct *mm);
#endif #endif
...@@ -24,6 +24,7 @@ config NIOS2 ...@@ -24,6 +24,7 @@ config NIOS2
select USB_ARCH_HAS_HCD if USB_SUPPORT select USB_ARCH_HAS_HCD if USB_SUPPORT
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select ARCH_DISCARD_MEMBLOCK select ARCH_DISCARD_MEMBLOCK
select MMU_GATHER_NO_RANGE if MMU
config GENERIC_CSUM config GENERIC_CSUM
def_bool y def_bool y
......
...@@ -11,22 +11,12 @@ ...@@ -11,22 +11,12 @@
#ifndef _ASM_NIOS2_TLB_H #ifndef _ASM_NIOS2_TLB_H
#define _ASM_NIOS2_TLB_H #define _ASM_NIOS2_TLB_H
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
extern void set_mmu_pid(unsigned long pid); extern void set_mmu_pid(unsigned long pid);
/* /*
* NiosII doesn't need any special per-pte or per-vma handling, except * NIOS32 does have flush_tlb_range(), but it lacks a limit and fallback to
* we need to flush cache for the area to be unmapped. * full mm invalidation. So use flush_tlb_mm() for everything.
*/ */
#define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
......
...@@ -36,6 +36,7 @@ config OPENRISC ...@@ -36,6 +36,7 @@ config OPENRISC
select OMPIC if SMP select OMPIC if SMP
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_MULTI_HANDLER
select MMU_GATHER_NO_RANGE if MMU
config CPU_BIG_ENDIAN config CPU_BIG_ENDIAN
def_bool y def_bool y
......
...@@ -20,14 +20,10 @@ ...@@ -20,14 +20,10 @@
#define __ASM_OPENRISC_TLB_H__ #define __ASM_OPENRISC_TLB_H__
/* /*
* or32 doesn't need any special per-pte or * OpenRISC doesn't have an efficient flush_tlb_range() so use flush_tlb_mm()
* per-vma handling.. * for everything.
*/ */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
......
...@@ -2,24 +2,6 @@ ...@@ -2,24 +2,6 @@
#ifndef _PARISC_TLB_H #ifndef _PARISC_TLB_H
#define _PARISC_TLB_H #define _PARISC_TLB_H
#define tlb_flush(tlb) \
do { if ((tlb)->fullmm) \
flush_tlb_mm((tlb)->mm);\
} while (0)
#define tlb_start_vma(tlb, vma) \
do { if (!(tlb)->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb, vma) \
do { if (!(tlb)->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) \
do { } while (0)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
......
...@@ -218,6 +218,8 @@ config PPC ...@@ -218,6 +218,8 @@ config PPC
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP select HAVE_RCU_TABLE_FREE if SMP
select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
......
...@@ -27,8 +27,8 @@ ...@@ -27,8 +27,8 @@
#define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry #define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
#define tlb_flush tlb_flush
extern void tlb_flush(struct mmu_gather *tlb); extern void tlb_flush(struct mmu_gather *tlb);
/* Get the generic bits... */ /* Get the generic bits... */
...@@ -46,22 +46,6 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, ...@@ -46,22 +46,6 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
#endif #endif
} }
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{
if (!tlb->page_size)
tlb->page_size = page_size;
else if (tlb->page_size != page_size) {
if (!tlb->fullmm)
tlb_flush_mmu(tlb);
/*
* update the page size after flush for the new
* mmu_gather.
*/
tlb->page_size = page_size;
}
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline int mm_is_core_local(struct mm_struct *mm) static inline int mm_is_core_local(struct mm_struct *mm)
{ {
......
...@@ -18,6 +18,7 @@ struct mmu_gather; ...@@ -18,6 +18,7 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb); static void tlb_flush(struct mmu_gather *tlb);
#define tlb_flush tlb_flush
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
static inline void tlb_flush(struct mmu_gather *tlb) static inline void tlb_flush(struct mmu_gather *tlb)
......
...@@ -164,11 +164,13 @@ config S390 ...@@ -164,11 +164,13 @@ config S390
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP select HAVE_MEMBLOCK_PHYS_MAP
select HAVE_MMU_GATHER_NO_GATHER
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NOP_MCOUNT select HAVE_NOP_MCOUNT
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_PCI select HAVE_PCI
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
......
...@@ -22,98 +22,39 @@ ...@@ -22,98 +22,39 @@
* Pages used for the page tables is a different story. FIXME: more * Pages used for the page tables is a different story. FIXME: more
*/ */
#include <linux/mm.h> void __tlb_remove_table(void *_table);
#include <linux/pagemap.h> static inline void tlb_flush(struct mmu_gather *tlb);
#include <linux/swap.h> static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
#include <asm/processor.h> struct page *page, int page_size);
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
struct mmu_gather {
struct mm_struct *mm;
struct mmu_table_batch *batch;
unsigned int fullmm;
unsigned long start, end;
};
struct mmu_table_batch {
struct rcu_head rcu;
unsigned int nr;
void *tables[0];
};
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
static inline void
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
tlb->end = end;
tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
__tlb_flush_mm_lazy(tlb->mm);
}
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
tlb_table_flush(tlb);
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb) #define tlb_start_vma(tlb, vma) do { } while (0)
{ #define tlb_end_vma(tlb, vma) do { } while (0)
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
static inline void #define tlb_flush tlb_flush
arch_tlb_finish_mmu(struct mmu_gather *tlb, #define pte_free_tlb pte_free_tlb
unsigned long start, unsigned long end, bool force) #define pmd_free_tlb pmd_free_tlb
{ #define p4d_free_tlb p4d_free_tlb
if (force) { #define pud_free_tlb pud_free_tlb
tlb->start = start;
tlb->end = end;
}
tlb_flush_mmu(tlb); #include <asm/pgalloc.h>
} #include <asm/tlbflush.h>
#include <asm-generic/tlb.h>
/* /*
* Release the page cache reference for a pte removed by * Release the page cache reference for a pte removed by
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache. * has already been freed, so just do free_page_and_swap_cache.
*/ */
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
}
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size) struct page *page, int page_size)
{ {
return __tlb_remove_page(tlb, page); free_page_and_swap_cache(page);
return false;
} }
static inline void tlb_remove_page_size(struct mmu_gather *tlb, static inline void tlb_flush(struct mmu_gather *tlb)
struct page *page, int page_size)
{ {
return tlb_remove_page(tlb, page); __tlb_flush_mm_lazy(tlb->mm);
} }
/* /*
...@@ -121,8 +62,17 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb, ...@@ -121,8 +62,17 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
* page table from the tlb. * page table from the tlb.
*/ */
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address) unsigned long address)
{ {
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_ptes = 1;
/*
* page_table_free_rcu takes care of the allocation bit masks
* of the 2K table fragments in the 4K page table page,
* then calls tlb_remove_table.
*/
page_table_free_rcu(tlb, (unsigned long *) pte, address); page_table_free_rcu(tlb, (unsigned long *) pte, address);
} }
...@@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, ...@@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
if (mm_pmd_folded(tlb->mm)) if (mm_pmd_folded(tlb->mm))
return; return;
pgtable_pmd_page_dtor(virt_to_page(pmd)); pgtable_pmd_page_dtor(virt_to_page(pmd));
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_puds = 1;
tlb_remove_table(tlb, pmd); tlb_remove_table(tlb, pmd);
} }
...@@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, ...@@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
{ {
if (mm_p4d_folded(tlb->mm)) if (mm_p4d_folded(tlb->mm))
return; return;
__tlb_adjust_range(tlb, address, PAGE_SIZE);
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_p4ds = 1;
tlb_remove_table(tlb, p4d); tlb_remove_table(tlb, p4d);
} }
...@@ -169,21 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -169,21 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
{ {
if (mm_pud_folded(tlb->mm)) if (mm_pud_folded(tlb->mm))
return; return;
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_puds = 1;
tlb_remove_table(tlb, pud); tlb_remove_table(tlb, pud);
} }
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{
}
#endif /* _S390_TLB_H */ #endif /* _S390_TLB_H */
...@@ -290,7 +290,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, ...@@ -290,7 +290,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
tlb_remove_table(tlb, table); tlb_remove_table(tlb, table);
} }
static void __tlb_remove_table(void *_table) void __tlb_remove_table(void *_table)
{ {
unsigned int mask = (unsigned long) _table & 3; unsigned int mask = (unsigned long) _table & 3;
void *table = (void *)((unsigned long) _table ^ mask); void *table = (void *)((unsigned long) _table ^ mask);
...@@ -316,67 +316,6 @@ static void __tlb_remove_table(void *_table) ...@@ -316,67 +316,6 @@ static void __tlb_remove_table(void *_table)
} }
} }
static void tlb_remove_table_smp_sync(void *arg)
{
/* Simply deliver the interrupt */
}
static void tlb_remove_table_one(void *table)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
* assumed to be actually RCU-freed.
*
* It is however sufficient for software page-table walkers that rely
* on IRQ disabling. See the comment near struct mmu_table_batch.
*/
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
__tlb_remove_table(table);
}
static void tlb_remove_table_rcu(struct rcu_head *head)
{
struct mmu_table_batch *batch;
int i;
batch = container_of(head, struct mmu_table_batch, rcu);
for (i = 0; i < batch->nr; i++)
__tlb_remove_table(batch->tables[i]);
free_page((unsigned long)batch);
}
void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
if (*batch) {
call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
}
void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
tlb->mm->context.flush_mm = 1;
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)
__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
__tlb_flush_mm_lazy(tlb->mm);
tlb_remove_table_one(table);
return;
}
(*batch)->nr = 0;
}
(*batch)->tables[(*batch)->nr++] = table;
if ((*batch)->nr == MAX_TABLE_BATCH)
tlb_flush_mmu(tlb);
}
/* /*
* Base infrastructure required to generate basic asces, region, segment, * Base infrastructure required to generate basic asces, region, segment,
* and page tables that do not make use of enhanced features like EDAT1. * and page tables that do not make use of enhanced features like EDAT1.
......
...@@ -70,6 +70,15 @@ do { \ ...@@ -70,6 +70,15 @@ do { \
tlb_remove_page((tlb), (pte)); \ tlb_remove_page((tlb), (pte)); \
} while (0) } while (0)
#if CONFIG_PGTABLE_LEVELS > 2
#define __pmd_free_tlb(tlb, pmdp, addr) \
do { \
struct page *page = virt_to_page(pmdp); \
pgtable_pmd_page_dtor(page); \
tlb_remove_page((tlb), page); \
} while (0);
#endif
static inline void check_pgt_cache(void) static inline void check_pgt_cache(void)
{ {
quicklist_trim(QUICK_PT, NULL, 25, 16); quicklist_trim(QUICK_PT, NULL, 25, 16);
......
...@@ -11,133 +11,8 @@ ...@@ -11,133 +11,8 @@
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#include <linux/swap.h> #include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
/* #include <asm-generic/tlb.h>
* TLB handling. This allows us to remove pages from the page
* tables, and efficiently handle the TLB issues.
*/
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
unsigned long start, end;
};
static inline void init_tlb_gather(struct mmu_gather *tlb)
{
tlb->start = TASK_SIZE;
tlb->end = 0;
if (tlb->fullmm) {
tlb->start = 0;
tlb->end = TASK_SIZE;
}
}
static inline void
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
tlb->end = end;
tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
static inline void
arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
if (tlb->fullmm || force)
flush_tlb_mm(tlb->mm);
/* keep the page table cache within bounds */
check_pgt_cache();
}
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
{
if (tlb->start > address)
tlb->start = address;
if (tlb->end < address + PAGE_SIZE)
tlb->end = address + PAGE_SIZE;
}
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm)
flush_cache_range(vma, vma->vm_start, vma->vm_end);
}
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm && tlb->end) {
flush_tlb_range(vma, tlb->start, tlb->end);
init_tlb_gather(tlb);
}
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
}
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
}
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
__tlb_remove_page(tlb, page);
}
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return __tlb_remove_page(tlb, page);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return tlb_remove_page(tlb, page);
}
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{
}
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64) #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
...@@ -157,11 +32,6 @@ static inline void tlb_unwire_entry(void) ...@@ -157,11 +32,6 @@ static inline void tlb_unwire_entry(void)
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#define tlb_flush(tlb) do { } while (0)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -63,6 +63,7 @@ config SPARC64 ...@@ -63,6 +63,7 @@ config SPARC64
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_RCU_TABLE_FREE if SMP select HAVE_RCU_TABLE_FREE if SMP
select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
......
...@@ -2,24 +2,6 @@ ...@@ -2,24 +2,6 @@
#ifndef _SPARC_TLB_H #ifndef _SPARC_TLB_H
#define _SPARC_TLB_H #define _SPARC_TLB_H
#define tlb_start_vma(tlb, vma) \
do { \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb, vma) \
do { \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) \
do { } while (0)
#define tlb_flush(tlb) \
do { \
flush_tlb_mm((tlb)->mm); \
} while (0)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif /* _SPARC_TLB_H */ #endif /* _SPARC_TLB_H */
...@@ -2,162 +2,8 @@ ...@@ -2,162 +2,8 @@
#ifndef __UM_TLB_H #ifndef __UM_TLB_H
#define __UM_TLB_H #define __UM_TLB_H
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/percpu.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm-generic/cacheflush.h>
#define tlb_start_vma(tlb, vma) do { } while (0) #include <asm-generic/tlb.h>
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
/* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
*/
struct mmu_gather {
struct mm_struct *mm;
unsigned int need_flush; /* Really unmapped some ptes? */
unsigned long start;
unsigned long end;
unsigned int fullmm; /* non-zero means full mm flush */
};
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
unsigned long address)
{
if (tlb->start > address)
tlb->start = address;
if (tlb->end < address + PAGE_SIZE)
tlb->end = address + PAGE_SIZE;
}
static inline void init_tlb_gather(struct mmu_gather *tlb)
{
tlb->need_flush = 0;
tlb->start = TASK_SIZE;
tlb->end = 0;
if (tlb->fullmm) {
tlb->start = 0;
tlb->end = TASK_SIZE;
}
}
static inline void
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
tlb->end = end;
tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
static inline void
tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
}
static inline void
tlb_flush_mmu_free(struct mmu_gather *tlb)
{
init_tlb_gather(tlb);
}
static inline void
tlb_flush_mmu(struct mmu_gather *tlb)
{
if (!tlb->need_flush)
return;
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
/* arch_tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
static inline void
arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
if (force) {
tlb->start = start;
tlb->end = end;
tlb->need_flush = 1;
}
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
}
/* tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
* while handling the additional races in SMP caused by other CPUs
* caching valid mappings in their TLBs.
*/
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;
free_page_and_swap_cache(page);
return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
__tlb_remove_page(tlb, page);
}
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return __tlb_remove_page(tlb, page);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return tlb_remove_page(tlb, page);
}
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
* Record the fact that pte's were really umapped in ->need_flush, so we can
* later optimise away the tlb invalidate. This helps when userspace is
* unmapping already-unmapped pages, which happens quite a lot.
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{
}
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define tlb_migrate_finish(mm) do {} while (0)
#endif #endif
...@@ -20,6 +20,7 @@ config UNICORE32 ...@@ -20,6 +20,7 @@ config UNICORE32
select GENERIC_IOMAP select GENERIC_IOMAP
select MODULES_USE_ELF_REL select MODULES_USE_ELF_REL
select NEED_DMA_MAP_STATE select NEED_DMA_MAP_STATE
select MMU_GATHER_NO_RANGE if MMU
help help
UniCore-32 is 32-bit Instruction Set Architecture, UniCore-32 is 32-bit Instruction Set Architecture,
including a series of low-power-consumption RISC chip including a series of low-power-consumption RISC chip
......
...@@ -12,10 +12,9 @@ ...@@ -12,10 +12,9 @@
#ifndef __UNICORE_TLB_H__ #ifndef __UNICORE_TLB_H__
#define __UNICORE_TLB_H__ #define __UNICORE_TLB_H__
#define tlb_start_vma(tlb, vma) do { } while (0) /*
#define tlb_end_vma(tlb, vma) do { } while (0) * unicore32 lacks an efficient flush_tlb_range(), use flush_tlb_mm().
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) */
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#define __pte_free_tlb(tlb, pte, addr) \ #define __pte_free_tlb(tlb, pte, addr) \
do { \ do { \
......
...@@ -183,7 +183,6 @@ config X86 ...@@ -183,7 +183,6 @@ config X86
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if PARAVIRT select HAVE_RCU_TABLE_FREE if PARAVIRT
select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define tlb_end_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush tlb_flush
static inline void tlb_flush(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
......
...@@ -14,32 +14,6 @@ ...@@ -14,32 +14,6 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/page.h> #include <asm/page.h>
#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
/* Note, read http://lkml.org/lkml/2004/1/15/6 */
# define tlb_start_vma(tlb,vma) do { } while (0)
# define tlb_end_vma(tlb,vma) do { } while (0)
#else
# define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
# define tlb_end_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
#endif
#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
......
...@@ -19,9 +19,131 @@ ...@@ -19,9 +19,131 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/*
* Generic MMU-gather implementation.
*
* The mmu_gather data structure is used by the mm code to implement the
* correct and efficient ordering of freeing pages and TLB invalidations.
*
* This correct ordering is:
*
* 1) unhook page
* 2) TLB invalidate page
* 3) free page
*
* That is, we must never free a page before we have ensured there are no live
* translations left to it. Otherwise it might be possible to observe (or
* worse, change) the page content after it has been reused.
*
* The mmu_gather API consists of:
*
* - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
*
* Finish in particular will issue a (final) TLB invalidate and free
* all (remaining) queued pages.
*
* - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
*
* Defaults to flushing at tlb_end_vma() to reset the range; helps when
* there's large holes between the VMAs.
*
* - tlb_remove_page() / __tlb_remove_page()
* - tlb_remove_page_size() / __tlb_remove_page_size()
*
* __tlb_remove_page_size() is the basic primitive that queues a page for
* freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
* boolean indicating if the queue is (now) full and a call to
* tlb_flush_mmu() is required.
*
* tlb_remove_page() and tlb_remove_page_size() imply the call to
* tlb_flush_mmu() when required and has no return value.
*
* - tlb_change_page_size()
*
* call before __tlb_remove_page*() to set the current page-size; implies a
* possible tlb_flush_mmu() call.
*
* - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
*
* tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
* related state, like the range)
*
* tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
* whatever pages are still batched.
*
* - mmu_gather::fullmm
*
* A flag set by tlb_gather_mmu() to indicate we're going to free
* the entire mm; this allows a number of optimizations.
*
* - We can ignore tlb_{start,end}_vma(); because we don't
* care about ranges. Everything will be shot down.
*
* - (RISC) architectures that use ASIDs can cycle to a new ASID
* and delay the invalidation until ASID space runs out.
*
* - mmu_gather::need_flush_all
*
* A flag that can be set by the arch code if it wants to force
* flush the entire TLB irrespective of the range. For instance
* x86-PAE needs this when changing top-level entries.
*
* And allows the architecture to provide and implement tlb_flush():
*
* tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
* use of:
*
* - mmu_gather::start / mmu_gather::end
*
* which provides the range that needs to be flushed to cover the pages to
* be freed.
*
* - mmu_gather::freed_tables
*
* set when we freed page table pages
*
* - tlb_get_unmap_shift() / tlb_get_unmap_size()
*
* returns the smallest TLB entry size unmapped in this range.
*
* If an architecture does not provide tlb_flush() a default implementation
* based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
* specified, in which case we'll default to flush_tlb_mm().
*
* Additionally there are a few opt-in features:
*
* HAVE_MMU_GATHER_PAGE_SIZE
*
* This ensures we call tlb_flush() every time tlb_change_page_size() actually
* changes the size and provides mmu_gather::page_size to tlb_flush().
*
* HAVE_RCU_TABLE_FREE
*
* This provides tlb_remove_table(), to be used instead of tlb_remove_page()
* for page directores (__p*_free_tlb()). This provides separate freeing of
* the page-table pages themselves in a semi-RCU fashion (see comment below).
* Useful if your architecture doesn't use IPIs for remote TLB invalidates
* and therefore doesn't naturally serialize with software page-table walkers.
*
* When used, an architecture is expected to provide __tlb_remove_table()
* which does the actual freeing of these pages.
*
* HAVE_RCU_TABLE_NO_INVALIDATE
*
* This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
* freeing the page-table pages. This can be avoided if you use
* HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
* page-tables natively.
*
* MMU_GATHER_NO_RANGE
*
* Use this if your architecture lacks an efficient flush_tlb_range().
*/
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifdef CONFIG_HAVE_RCU_TABLE_FREE
/* /*
* Semi RCU freeing of the page directories. * Semi RCU freeing of the page directories.
...@@ -60,11 +182,11 @@ struct mmu_table_batch { ...@@ -60,11 +182,11 @@ struct mmu_table_batch {
#define MAX_TABLE_BATCH \ #define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table); extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#endif #endif
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
/* /*
* If we can't allocate a page to make a big batch of page pointers * If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure. * to work on, then just handle a few from the on-stack structure.
...@@ -89,14 +211,21 @@ struct mmu_gather_batch { ...@@ -89,14 +211,21 @@ struct mmu_gather_batch {
*/ */
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
/* struct mmu_gather is an opaque type used by the mm code for passing around extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
#endif
/*
* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page. * any data needed by arch specific code for tlb_remove_page.
*/ */
struct mmu_gather { struct mmu_gather {
struct mm_struct *mm; struct mm_struct *mm;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch; struct mmu_table_batch *batch;
#endif #endif
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
/* /*
...@@ -124,23 +253,30 @@ struct mmu_gather { ...@@ -124,23 +253,30 @@ struct mmu_gather {
unsigned int cleared_puds : 1; unsigned int cleared_puds : 1;
unsigned int cleared_p4ds : 1; unsigned int cleared_p4ds : 1;
/*
* tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
*/
unsigned int vma_exec : 1;
unsigned int vma_huge : 1;
unsigned int batch_count;
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
struct mmu_gather_batch *active; struct mmu_gather_batch *active;
struct mmu_gather_batch local; struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE]; struct page *__pages[MMU_GATHER_BUNDLE];
unsigned int batch_count;
int page_size;
};
#define HAVE_GENERIC_MMU_GATHER #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
unsigned int page_size;
#endif
#endif
};
void arch_tlb_gather_mmu(struct mmu_gather *tlb, void arch_tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm, unsigned long start, unsigned long end); struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb, void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force); unsigned long start, unsigned long end, bool force);
void tlb_flush_mmu_free(struct mmu_gather *tlb);
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
static inline void __tlb_adjust_range(struct mmu_gather *tlb, static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address, unsigned long address,
...@@ -163,8 +299,94 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) ...@@ -163,8 +299,94 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->cleared_pmds = 0; tlb->cleared_pmds = 0;
tlb->cleared_puds = 0; tlb->cleared_puds = 0;
tlb->cleared_p4ds = 0; tlb->cleared_p4ds = 0;
/*
* Do not reset mmu_gather::vma_* fields here, we do not
* call into tlb_start_vma() again to set them if there is an
* intermediate flush.
*/
}
#ifdef CONFIG_MMU_GATHER_NO_RANGE
#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
#endif
/*
* When an architecture does not have efficient means of range flushing TLBs
* there is no point in doing intermediate flushes on tlb_end_vma() to keep the
* range small. We equally don't have to worry about page granularity or other
* things.
*
* All we need to do is issue a full flush for any !0 range.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->end)
flush_tlb_mm(tlb->mm);
}
static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#define tlb_end_vma tlb_end_vma
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#else /* CONFIG_MMU_GATHER_NO_RANGE */
#ifndef tlb_flush
#if defined(tlb_start_vma) || defined(tlb_end_vma)
#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
#endif
/*
* When an architecture does not provide its own tlb_flush() implementation
* but does have a reasonably efficient flush_vma_range() implementation
* use that.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm || tlb->need_flush_all) {
flush_tlb_mm(tlb->mm);
} else if (tlb->end) {
struct vm_area_struct vma = {
.vm_mm = tlb->mm,
.vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
(tlb->vma_huge ? VM_HUGETLB : 0),
};
flush_tlb_range(&vma, tlb->start, tlb->end);
}
} }
static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
/*
* flush_tlb_range() implementations that look at VM_HUGETLB (tile,
* mips-4k) flush only large pages.
*
* flush_tlb_range() implementations that flush I-TLB also flush D-TLB
* (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
* range.
*
* We rely on tlb_end_vma() to issue a flush, such that when we reset
* these values the batch is empty.
*/
tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
}
#else
static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#endif
#endif /* CONFIG_MMU_GATHER_NO_RANGE */
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{ {
if (!tlb->end) if (!tlb->end)
...@@ -196,21 +418,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) ...@@ -196,21 +418,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE); return tlb_remove_page_size(tlb, page, PAGE_SIZE);
} }
#ifndef tlb_remove_check_page_size_change static inline void tlb_change_page_size(struct mmu_gather *tlb,
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size) unsigned int page_size)
{ {
/* #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
* We don't care about page size change, just update if (tlb->page_size && tlb->page_size != page_size) {
* mmu_gather page size here so that debug checks if (!tlb->fullmm)
* doesn't throw false warning. tlb_flush_mmu(tlb);
*/ }
#ifdef CONFIG_DEBUG_VM
tlb->page_size = page_size; tlb->page_size = page_size;
#endif #endif
} }
#endif
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
{ {
...@@ -237,17 +456,30 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) ...@@ -237,17 +456,30 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
* the vmas are adjusted to only cover the region to be torn down. * the vmas are adjusted to only cover the region to be torn down.
*/ */
#ifndef tlb_start_vma #ifndef tlb_start_vma
#define tlb_start_vma(tlb, vma) do { } while (0) static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
#endif {
if (tlb->fullmm)
return;
#define __tlb_end_vma(tlb, vma) \ tlb_update_vma_flags(tlb, vma);
do { \ flush_cache_range(vma, vma->vm_start, vma->vm_end);
if (!tlb->fullmm) \ }
tlb_flush_mmu_tlbonly(tlb); \ #endif
} while (0)
#ifndef tlb_end_vma #ifndef tlb_end_vma
#define tlb_end_vma __tlb_end_vma static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
/*
* Do a TLB flush and reset the range at VMA boundaries; this avoids
* the ranges growing with the unused space between consecutive VMAs,
* but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
* this.
*/
tlb_flush_mmu_tlbonly(tlb);
}
#endif #endif
#ifndef __tlb_remove_tlb_entry #ifndef __tlb_remove_tlb_entry
...@@ -372,6 +604,4 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) ...@@ -372,6 +604,4 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#define tlb_migrate_finish(mm) do {} while (0)
#endif /* _ASM_GENERIC__TLB_H */ #endif /* _ASM_GENERIC__TLB_H */
...@@ -55,7 +55,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size, ...@@ -55,7 +55,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size,
* *
* MEMREMAP_WB - matches the default mapping for System RAM on * MEMREMAP_WB - matches the default mapping for System RAM on
* the architecture. This is usually a read-allocate write-back cache. * the architecture. This is usually a read-allocate write-back cache.
* Morever, if MEMREMAP_WB is specified and the requested remap region is RAM * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
* memremap() will bypass establishing a new mapping and instead return * memremap() will bypass establishing a new mapping and instead return
* a pointer into the direct map. * a pointer into the direct map.
* *
...@@ -86,7 +86,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags) ...@@ -86,7 +86,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
/* Try all mapping types requested until one returns non-NULL */ /* Try all mapping types requested until one returns non-NULL */
if (flags & MEMREMAP_WB) { if (flags & MEMREMAP_WB) {
/* /*
* MEMREMAP_WB is special in that it can be satisifed * MEMREMAP_WB is special in that it can be satisfied
* from the direct map. Some archs depend on the * from the direct map. Some archs depend on the
* capability of memremap() to autodetect cases where * capability of memremap() to autodetect cases where
* the requested range is potentially in System RAM. * the requested range is potentially in System RAM.
......
...@@ -520,21 +520,20 @@ EXPORT_SYMBOL_GPL(page_is_ram); ...@@ -520,21 +520,20 @@ EXPORT_SYMBOL_GPL(page_is_ram);
int region_intersects(resource_size_t start, size_t size, unsigned long flags, int region_intersects(resource_size_t start, size_t size, unsigned long flags,
unsigned long desc) unsigned long desc)
{ {
resource_size_t end = start + size - 1; struct resource res;
int type = 0; int other = 0; int type = 0; int other = 0;
struct resource *p; struct resource *p;
res.start = start;
res.end = start + size - 1;
read_lock(&resource_lock); read_lock(&resource_lock);
for (p = iomem_resource.child; p ; p = p->sibling) { for (p = iomem_resource.child; p ; p = p->sibling) {
bool is_type = (((p->flags & flags) == flags) && bool is_type = (((p->flags & flags) == flags) &&
((desc == IORES_DESC_NONE) || ((desc == IORES_DESC_NONE) ||
(desc == p->desc))); (desc == p->desc)));
if (start >= p->start && start <= p->end) if (resource_overlaps(p, &res))
is_type ? type++ : other++;
if (end >= p->start && end <= p->end)
is_type ? type++ : other++;
if (p->start >= start && p->end <= end)
is_type ? type++ : other++; is_type ? type++ : other++;
} }
read_unlock(&resource_lock); read_unlock(&resource_lock);
......
...@@ -1151,7 +1151,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, ...@@ -1151,7 +1151,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
/* Need help from migration thread: drop lock and wait. */ /* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &rf); task_rq_unlock(rq, p, &rf);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0; return 0;
} else if (task_on_rq_queued(p)) { } else if (task_on_rq_queued(p)) {
/* /*
......
...@@ -1677,7 +1677,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1677,7 +1677,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct mm_struct *mm = tlb->mm; struct mm_struct *mm = tlb->mm;
bool ret = false; bool ret = false;
tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
ptl = pmd_trans_huge_lock(pmd, vma); ptl = pmd_trans_huge_lock(pmd, vma);
if (!ptl) if (!ptl)
...@@ -1753,7 +1753,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1753,7 +1753,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t orig_pmd; pmd_t orig_pmd;
spinlock_t *ptl; spinlock_t *ptl;
tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
ptl = __pmd_trans_huge_lock(pmd, vma); ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl) if (!ptl)
......
...@@ -3353,7 +3353,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -3353,7 +3353,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* This is a hugetlb vma, all the pte entries should point * This is a hugetlb vma, all the pte entries should point
* to huge page. * to huge page.
*/ */
tlb_remove_check_page_size_change(tlb, sz); tlb_change_page_size(tlb, sz);
tlb_start_vma(tlb, vma); tlb_start_vma(tlb, vma);
/* /*
......
...@@ -328,7 +328,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -328,7 +328,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
return 0; return 0;
tlb_remove_check_page_size_change(tlb, PAGE_SIZE); tlb_change_page_size(tlb, PAGE_SIZE);
orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
flush_tlb_batched_pending(mm); flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
......
...@@ -356,7 +356,7 @@ void free_pgd_range(struct mmu_gather *tlb, ...@@ -356,7 +356,7 @@ void free_pgd_range(struct mmu_gather *tlb,
* We add page table cache pages with PAGE_SIZE, * We add page table cache pages with PAGE_SIZE,
* (see pte_free_tlb()), flush the tlb if we need * (see pte_free_tlb()), flush the tlb if we need
*/ */
tlb_remove_check_page_size_change(tlb, PAGE_SIZE); tlb_change_page_size(tlb, PAGE_SIZE);
pgd = pgd_offset(tlb->mm, addr); pgd = pgd_offset(tlb->mm, addr);
do { do {
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
...@@ -1046,7 +1046,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1046,7 +1046,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
pte_t *pte; pte_t *pte;
swp_entry_t entry; swp_entry_t entry;
tlb_remove_check_page_size_change(tlb, PAGE_SIZE); tlb_change_page_size(tlb, PAGE_SIZE);
again: again:
init_rss_vec(rss); init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
...@@ -1155,7 +1155,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1155,7 +1155,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
*/ */
if (force_flush) { if (force_flush) {
force_flush = 0; force_flush = 0;
tlb_flush_mmu_free(tlb); tlb_flush_mmu(tlb);
if (addr != end) if (addr != end)
goto again; goto again;
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#ifdef HAVE_GENERIC_MMU_GATHER #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
static bool tlb_next_batch(struct mmu_gather *tlb) static bool tlb_next_batch(struct mmu_gather *tlb)
{ {
...@@ -41,35 +41,10 @@ static bool tlb_next_batch(struct mmu_gather *tlb) ...@@ -41,35 +41,10 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
return true; return true;
} }
void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, static void tlb_batch_pages_flush(struct mmu_gather *tlb)
unsigned long start, unsigned long end)
{
tlb->mm = mm;
/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
tlb->page_size = 0;
__tlb_reset_range(tlb);
}
void tlb_flush_mmu_free(struct mmu_gather *tlb)
{ {
struct mmu_gather_batch *batch; struct mmu_gather_batch *batch;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr); free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0; batch->nr = 0;
...@@ -77,31 +52,10 @@ void tlb_flush_mmu_free(struct mmu_gather *tlb) ...@@ -77,31 +52,10 @@ void tlb_flush_mmu_free(struct mmu_gather *tlb)
tlb->active = &tlb->local; tlb->active = &tlb->local;
} }
void tlb_flush_mmu(struct mmu_gather *tlb) static void tlb_batch_list_free(struct mmu_gather *tlb)
{
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{ {
struct mmu_gather_batch *batch, *next; struct mmu_gather_batch *batch, *next;
if (force) {
__tlb_reset_range(tlb);
__tlb_adjust_range(tlb, start, end - start);
}
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
for (batch = tlb->local.next; batch; batch = next) { for (batch = tlb->local.next; batch; batch = next) {
next = batch->next; next = batch->next;
free_pages((unsigned long)batch, 0); free_pages((unsigned long)batch, 0);
...@@ -109,19 +63,15 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb, ...@@ -109,19 +63,15 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
tlb->local.next = NULL; tlb->local.next = NULL;
} }
/* __tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs. Returns the number of free page slots left.
* When out of page slots we must call tlb_flush_mmu().
*returns true if the caller should flush.
*/
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
{ {
struct mmu_gather_batch *batch; struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->end); VM_BUG_ON(!tlb->end);
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
VM_WARN_ON(tlb->page_size != page_size); VM_WARN_ON(tlb->page_size != page_size);
#endif
batch = tlb->active; batch = tlb->active;
/* /*
...@@ -139,7 +89,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ ...@@ -139,7 +89,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
return false; return false;
} }
#endif /* HAVE_GENERIC_MMU_GATHER */ #endif /* HAVE_MMU_GATHER_NO_GATHER */
#ifdef CONFIG_HAVE_RCU_TABLE_FREE #ifdef CONFIG_HAVE_RCU_TABLE_FREE
...@@ -152,7 +102,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ ...@@ -152,7 +102,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
*/ */
static inline void tlb_table_invalidate(struct mmu_gather *tlb) static inline void tlb_table_invalidate(struct mmu_gather *tlb)
{ {
#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE #ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
/* /*
* Invalidate page-table caches used by hardware walkers. Then we still * Invalidate page-table caches used by hardware walkers. Then we still
* need to RCU-sched wait while freeing the pages because software * need to RCU-sched wait while freeing the pages because software
...@@ -193,7 +143,7 @@ static void tlb_remove_table_rcu(struct rcu_head *head) ...@@ -193,7 +143,7 @@ static void tlb_remove_table_rcu(struct rcu_head *head)
free_page((unsigned long)batch); free_page((unsigned long)batch);
} }
void tlb_table_flush(struct mmu_gather *tlb) static void tlb_table_flush(struct mmu_gather *tlb)
{ {
struct mmu_table_batch **batch = &tlb->batch; struct mmu_table_batch **batch = &tlb->batch;
...@@ -225,6 +175,22 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) ...@@ -225,6 +175,22 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb_batch_pages_flush(tlb);
#endif
}
void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
/** /**
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
* @tlb: the mmu_gather structure to initialize * @tlb: the mmu_gather structure to initialize
...@@ -240,10 +206,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) ...@@ -240,10 +206,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
arch_tlb_gather_mmu(tlb, mm, start, end); tlb->mm = mm;
/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#endif
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
tlb->page_size = 0;
#endif
__tlb_reset_range(tlb);
inc_tlb_flush_pending(tlb->mm); inc_tlb_flush_pending(tlb->mm);
} }
/**
* tlb_finish_mmu - finish an mmu_gather structure
* @tlb: the mmu_gather structure to finish
* @start: start of the region that will be removed from the page-table
* @end: end of the region that will be removed from the page-table
*
* Called at the end of the shootdown operation to free up any resources that
* were required.
*/
void tlb_finish_mmu(struct mmu_gather *tlb, void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -254,8 +250,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb, ...@@ -254,8 +250,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
* the TLB by observing pte_none|!pte_dirty, for example so flush TLB * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
* forcefully if we detect parallel PTE batching threads. * forcefully if we detect parallel PTE batching threads.
*/ */
bool force = mm_tlb_flush_nested(tlb->mm); if (mm_tlb_flush_nested(tlb->mm)) {
__tlb_reset_range(tlb);
__tlb_adjust_range(tlb, start, end - start);
}
arch_tlb_finish_mmu(tlb, start, end, force); tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb_batch_list_free(tlb);
#endif
dec_tlb_flush_pending(tlb->mm); dec_tlb_flush_pending(tlb->mm);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment