Commit ed6a7935 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

asm-generic/tlb, arch: Provide CONFIG_HAVE_MMU_GATHER_PAGE_SIZE

Move the mmu_gather::page_size things into the generic code instead of
PowerPC specific bits.

No change in behavior intended.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nick Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent dea2434c
......@@ -386,6 +386,9 @@ config HAVE_RCU_TABLE_FREE
config HAVE_RCU_TABLE_INVALIDATE
bool
config HAVE_MMU_GATHER_PAGE_SIZE
bool
config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool
......
......@@ -286,8 +286,7 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr
#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
}
......
......@@ -282,8 +282,7 @@ do { \
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
}
......
......@@ -218,6 +218,7 @@ config PPC
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select HAVE_SYSCALL_TRACEPOINTS
......
......@@ -27,7 +27,6 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
extern void tlb_flush(struct mmu_gather *tlb);
......@@ -46,22 +45,6 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
#endif
}
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{
if (!tlb->page_size)
tlb->page_size = page_size;
else if (tlb->page_size != page_size) {
if (!tlb->fullmm)
tlb_flush_mmu(tlb);
/*
* update the page size after flush for the new
* mmu_gather.
*/
tlb->page_size = page_size;
}
}
#ifdef CONFIG_SMP
static inline int mm_is_core_local(struct mm_struct *mm)
{
......
......@@ -180,9 +180,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
{
}
......
......@@ -127,9 +127,7 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
return tlb_remove_page(tlb, page);
}
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
{
}
......
......@@ -146,9 +146,7 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
{
}
......
......@@ -61,7 +61,7 @@
* tlb_remove_page() and tlb_remove_page_size() imply the call to
* tlb_flush_mmu() when required and has no return value.
*
* - tlb_remove_check_page_size_change()
* - tlb_change_page_size()
*
* call before __tlb_remove_page*() to set the current page-size; implies a
* possible tlb_flush_mmu() call.
......@@ -114,6 +114,11 @@
*
* Additionally there are a few opt-in features:
*
* HAVE_MMU_GATHER_PAGE_SIZE
*
* This ensures we call tlb_flush() every time tlb_change_page_size() actually
* changes the size and provides mmu_gather::page_size to tlb_flush().
*
* HAVE_RCU_TABLE_FREE
*
* This provides tlb_remove_table(), to be used instead of tlb_remove_page()
......@@ -239,11 +244,15 @@ struct mmu_gather {
unsigned int cleared_puds : 1;
unsigned int cleared_p4ds : 1;
unsigned int batch_count;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
unsigned int batch_count;
int page_size;
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
unsigned int page_size;
#endif
};
void arch_tlb_gather_mmu(struct mmu_gather *tlb,
......@@ -309,21 +318,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
#ifndef tlb_remove_check_page_size_change
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
/*
* We don't care about page size change, just update
* mmu_gather page size here so that debug checks
* doesn't throw false warning.
*/
#ifdef CONFIG_DEBUG_VM
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
if (tlb->page_size && tlb->page_size != page_size) {
if (!tlb->fullmm)
tlb_flush_mmu(tlb);
}
tlb->page_size = page_size;
#endif
}
#endif
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
{
......
......@@ -1641,7 +1641,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct mm_struct *mm = tlb->mm;
bool ret = false;
tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
ptl = pmd_trans_huge_lock(pmd, vma);
if (!ptl)
......@@ -1717,7 +1717,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t orig_pmd;
spinlock_t *ptl;
tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
......
......@@ -3353,7 +3353,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* This is a hugetlb vma, all the pte entries should point
* to huge page.
*/
tlb_remove_check_page_size_change(tlb, sz);
tlb_change_page_size(tlb, sz);
tlb_start_vma(tlb, vma);
/*
......
......@@ -328,7 +328,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd))
return 0;
tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
tlb_change_page_size(tlb, PAGE_SIZE);
orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
......
......@@ -356,7 +356,7 @@ void free_pgd_range(struct mmu_gather *tlb,
* We add page table cache pages with PAGE_SIZE,
* (see pte_free_tlb()), flush the tlb if we need
*/
tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
tlb_change_page_size(tlb, PAGE_SIZE);
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);
......@@ -1046,7 +1046,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
pte_t *pte;
swp_entry_t entry;
tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
tlb_change_page_size(tlb, PAGE_SIZE);
again:
init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
......
......@@ -58,7 +58,9 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
tlb->page_size = 0;
#endif
__tlb_reset_range(tlb);
}
......@@ -121,7 +123,10 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->end);
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
VM_WARN_ON(tlb->page_size != page_size);
#endif
batch = tlb->active;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment