Commit da4e7330 authored by Catalin Marinas's avatar Catalin Marinas Committed by Will Deacon

arm64: Clean up __flush_tlb(_kernel)_range functions

This patch moves the MAX_TLB_RANGE check into the
flush_tlb(_kernel)_range functions directly to avoid the
undescore-prefixed definitions (and for consistency with a subsequent
patch).
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent c53e0baa
...@@ -91,11 +91,23 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, ...@@ -91,11 +91,23 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
dsb(ish); dsb(ish);
} }
static inline void __flush_tlb_range(struct vm_area_struct *vma, /*
unsigned long start, unsigned long end) * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
* necessarily a performance improvement.
*/
#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ {
unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
unsigned long addr; unsigned long addr;
if ((end - start) > MAX_TLB_RANGE) {
flush_tlb_mm(vma->vm_mm);
return;
}
start = asid | (start >> 12); start = asid | (start >> 12);
end = asid | (end >> 12); end = asid | (end >> 12);
...@@ -105,9 +117,15 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, ...@@ -105,9 +117,15 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ish); dsb(ish);
} }
static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end) static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
unsigned long addr; unsigned long addr;
if ((end - start) > MAX_TLB_RANGE) {
flush_tlb_all();
return;
}
start >>= 12; start >>= 12;
end >>= 12; end >>= 12;
...@@ -118,29 +136,6 @@ static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long e ...@@ -118,29 +136,6 @@ static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long e
isb(); isb();
} }
/*
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
* necessarily a performance improvement.
*/
#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if ((end - start) <= MAX_TLB_RANGE)
__flush_tlb_range(vma, start, end);
else
flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
if ((end - start) <= MAX_TLB_RANGE)
__flush_tlb_kernel_range(start, end);
else
flush_tlb_all();
}
/* /*
* Used to invalidate the TLB (walk caches) corresponding to intermediate page * Used to invalidate the TLB (walk caches) corresponding to intermediate page
* table levels (pgd/pud/pmd). * table levels (pgd/pud/pmd).
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment