Commit a90744ba authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Linus Torvalds

mm: allow arch to supply p??_free_tlb functions

The mmu_gather APIs keep track of the invalidated address range
including the span covered by invalidated page table pages.  Ranges
covered by page tables but not ptes (and therefore no TLBs) still need
to be invalidated because some architectures (x86) can cache
intermediate page table entries, and invalidate those with normal TLB
invalidation instructions to be almost-backward-compatible.

Architectures which don't cache intermediate page table entries, or
which invalidate these caches separately from TLB invalidation, do not
require TLB invalidation range expanded over page tables.

Allow architectures to supply their own p??_free_tlb functions, which
can avoid the __tlb_adjust_range.

Link: http://lkml.kernel.org/r/20180703013131.2807-1-npiggin@gmail.comSigned-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: "Aneesh Kumar K. V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 02f51d45
...@@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, ...@@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
* For now w.r.t page table cache, mark the range_size as PAGE_SIZE * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
*/ */
#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \ #define pte_free_tlb(tlb, ptep, address) \
do { \ do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pte_free_tlb(tlb, ptep, address); \ __pte_free_tlb(tlb, ptep, address); \
} while (0) } while (0)
#endif
#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \ #define pmd_free_tlb(tlb, pmdp, address) \
do { \ do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pmd_free_tlb(tlb, pmdp, address); \ __pmd_free_tlb(tlb, pmdp, address); \
} while (0) } while (0)
#endif
#ifndef __ARCH_HAS_4LEVEL_HACK #ifndef __ARCH_HAS_4LEVEL_HACK
#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \ #define pud_free_tlb(tlb, pudp, address) \
do { \ do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pud_free_tlb(tlb, pudp, address); \ __pud_free_tlb(tlb, pudp, address); \
} while (0) } while (0)
#endif #endif
#endif
#ifndef __ARCH_HAS_5LEVEL_HACK #ifndef __ARCH_HAS_5LEVEL_HACK
#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \ #define p4d_free_tlb(tlb, pudp, address) \
do { \ do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
__p4d_free_tlb(tlb, pudp, address); \ __p4d_free_tlb(tlb, pudp, address); \
} while (0) } while (0)
#endif #endif
#endif
#define tlb_migrate_finish(mm) do {} while (0) #define tlb_migrate_finish(mm) do {} while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment