Commit 825c43f5 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Linus Torvalds

kmap_local: don't assume kmap PTEs are linear arrays in memory

The kmap_local conversion broke the ARM architecture, because the new
code assumes that all PTEs used for creating kmaps form a linear array
in memory, and uses array indexing to look up the kmap PTE belonging to
a certain kmap index.

On ARM, this cannot work, not only because the PTE pages may be
non-adjacent in memory, but also because ARM/!LPAE interleaves hardware
entries and extended entries (carrying software-only bits) in a way that
is not compatible with array indexing.

Fortunately, this only seems to affect configurations with more than 8
CPUs, due to the way the per-CPU kmap slots are organized in memory.

Work around this by permitting an architecture to set a Kconfig symbol
that signifies that the kmap PTEs do not form a lineary array in memory,
and so the only way to locate the appropriate one is to walk the page
tables.

Link: https://lore.kernel.org/linux-arm-kernel/20211026131249.3731275-1-ardb@kernel.org/
Link: https://lkml.kernel.org/r/20211116094737.7391-1-ardb@kernel.org
Fixes: 2a15ba82 ("ARM: highmem: Switch to generic kmap atomic")
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Reported-by: default avatarQuanyang Wang <quanyang.wang@windriver.com>
Reviewed-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Acked-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d78f3853
...@@ -1463,6 +1463,7 @@ config HIGHMEM ...@@ -1463,6 +1463,7 @@ config HIGHMEM
bool "High Memory Support" bool "High Memory Support"
depends on MMU depends on MMU
select KMAP_LOCAL select KMAP_LOCAL
select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
help help
The address space of ARM processors is only 4 Gigabytes large The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address and it has to accommodate user address space, kernel address
......
...@@ -890,6 +890,9 @@ config MAPPING_DIRTY_HELPERS ...@@ -890,6 +890,9 @@ config MAPPING_DIRTY_HELPERS
config KMAP_LOCAL config KMAP_LOCAL
bool bool
config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
bool
# struct io_mapping based helper. Selected by drivers that need them # struct io_mapping based helper. Selected by drivers that need them
config IO_MAPPING config IO_MAPPING
bool bool
......
...@@ -503,16 +503,22 @@ static inline int kmap_local_calc_idx(int idx) ...@@ -503,16 +503,22 @@ static inline int kmap_local_calc_idx(int idx)
static pte_t *__kmap_pte; static pte_t *__kmap_pte;
static pte_t *kmap_get_pte(void) static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
{ {
if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY))
/*
* Set by the arch if __kmap_pte[-idx] does not produce
* the correct entry.
*/
return virt_to_kpte(vaddr);
if (!__kmap_pte) if (!__kmap_pte)
__kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
return __kmap_pte; return &__kmap_pte[-idx];
} }
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
{ {
pte_t pteval, *kmap_pte = kmap_get_pte(); pte_t pteval, *kmap_pte;
unsigned long vaddr; unsigned long vaddr;
int idx; int idx;
...@@ -524,9 +530,10 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) ...@@ -524,9 +530,10 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
preempt_disable(); preempt_disable();
idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte - idx))); kmap_pte = kmap_get_pte(vaddr, idx);
BUG_ON(!pte_none(*kmap_pte));
pteval = pfn_pte(pfn, prot); pteval = pfn_pte(pfn, prot);
arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval); arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
arch_kmap_local_post_map(vaddr, pteval); arch_kmap_local_post_map(vaddr, pteval);
current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
preempt_enable(); preempt_enable();
...@@ -559,7 +566,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot); ...@@ -559,7 +566,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot);
void kunmap_local_indexed(void *vaddr) void kunmap_local_indexed(void *vaddr)
{ {
unsigned long addr = (unsigned long) vaddr & PAGE_MASK; unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
pte_t *kmap_pte = kmap_get_pte(); pte_t *kmap_pte;
int idx; int idx;
if (addr < __fix_to_virt(FIX_KMAP_END) || if (addr < __fix_to_virt(FIX_KMAP_END) ||
...@@ -584,8 +591,9 @@ void kunmap_local_indexed(void *vaddr) ...@@ -584,8 +591,9 @@ void kunmap_local_indexed(void *vaddr)
idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
kmap_pte = kmap_get_pte(addr, idx);
arch_kmap_local_pre_unmap(addr); arch_kmap_local_pre_unmap(addr);
pte_clear(&init_mm, addr, kmap_pte - idx); pte_clear(&init_mm, addr, kmap_pte);
arch_kmap_local_post_unmap(addr); arch_kmap_local_post_unmap(addr);
current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
kmap_local_idx_pop(); kmap_local_idx_pop();
...@@ -607,7 +615,7 @@ EXPORT_SYMBOL(kunmap_local_indexed); ...@@ -607,7 +615,7 @@ EXPORT_SYMBOL(kunmap_local_indexed);
void __kmap_local_sched_out(void) void __kmap_local_sched_out(void)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
pte_t *kmap_pte = kmap_get_pte(); pte_t *kmap_pte;
int i; int i;
/* Clear kmaps */ /* Clear kmaps */
...@@ -634,8 +642,9 @@ void __kmap_local_sched_out(void) ...@@ -634,8 +642,9 @@ void __kmap_local_sched_out(void)
idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
kmap_pte = kmap_get_pte(addr, idx);
arch_kmap_local_pre_unmap(addr); arch_kmap_local_pre_unmap(addr);
pte_clear(&init_mm, addr, kmap_pte - idx); pte_clear(&init_mm, addr, kmap_pte);
arch_kmap_local_post_unmap(addr); arch_kmap_local_post_unmap(addr);
} }
} }
...@@ -643,7 +652,7 @@ void __kmap_local_sched_out(void) ...@@ -643,7 +652,7 @@ void __kmap_local_sched_out(void)
void __kmap_local_sched_in(void) void __kmap_local_sched_in(void)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
pte_t *kmap_pte = kmap_get_pte(); pte_t *kmap_pte;
int i; int i;
/* Restore kmaps */ /* Restore kmaps */
...@@ -663,7 +672,8 @@ void __kmap_local_sched_in(void) ...@@ -663,7 +672,8 @@ void __kmap_local_sched_in(void)
/* See comment in __kmap_local_sched_out() */ /* See comment in __kmap_local_sched_out() */
idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte_at(&init_mm, addr, kmap_pte - idx, pteval); kmap_pte = kmap_get_pte(addr, idx);
set_pte_at(&init_mm, addr, kmap_pte, pteval);
arch_kmap_local_post_map(addr, pteval); arch_kmap_local_post_map(addr, pteval);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment