Commit 7ac1b26b authored by Thomas Gleixner's avatar Thomas Gleixner

microblaze/mm/highmem: Switch to generic kmap atomic

No reason having the same code in every architecture.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20201103095857.777445435@linutronix.de
parent 5af627a0
...@@ -155,6 +155,7 @@ config XILINX_UNCACHED_SHADOW ...@@ -155,6 +155,7 @@ config XILINX_UNCACHED_SHADOW
config HIGHMEM config HIGHMEM
bool "High memory support" bool "High memory support"
depends on MMU depends on MMU
select KMAP_LOCAL
help help
The address space of Microblaze processors is only 4 Gigabytes large The address space of Microblaze processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address and it has to accommodate user address space, kernel address
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <asm/page.h> #include <asm/page.h>
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/kmap_types.h> #include <asm/kmap_size.h>
#endif #endif
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE)) #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
...@@ -47,7 +47,7 @@ enum fixed_addresses { ...@@ -47,7 +47,7 @@ enum fixed_addresses {
FIX_HOLE, FIX_HOLE,
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * num_possible_cpus()) - 1,
#endif #endif
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table; extern pte_t *pkmap_page_table;
/* /*
...@@ -52,6 +51,11 @@ extern pte_t *pkmap_page_table; ...@@ -52,6 +51,11 @@ extern pte_t *pkmap_page_table;
#define flush_cache_kmaps() { flush_icache(); flush_dcache(); } #define flush_cache_kmaps() { flush_icache(); flush_dcache(); }
#define arch_kmap_local_post_map(vaddr, pteval) \
local_flush_tlb_page(NULL, vaddr);
#define arch_kmap_local_post_unmap(vaddr) \
local_flush_tlb_page(NULL, vaddr);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */ #endif /* _ASM_HIGHMEM_H */
...@@ -6,4 +6,3 @@ ...@@ -6,4 +6,3 @@
obj-y := consistent.o init.o obj-y := consistent.o init.o
obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
obj-$(CONFIG_HIGHMEM) += highmem.o
// SPDX-License-Identifier: GPL-2.0
/*
* highmem.c: virtual kernel memory mappings for high memory
*
* PowerPC version, stolen from the i386 version.
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terrabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*
* Reworked for PowerPC by various contributors. Moved from
* highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
*/
#include <linux/export.h>
#include <linux/highmem.h>
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
#include <asm/tlbflush.h>
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
local_flush_tlb_page(NULL, vaddr);
return (void *) vaddr;
}
EXPORT_SYMBOL(kmap_atomic_high_prot);
void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
unsigned int idx;
if (vaddr < __fix_to_virt(FIX_KMAP_END))
return;
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr);
kmap_atomic_idx_pop();
}
EXPORT_SYMBOL(kunmap_atomic_high);
...@@ -49,17 +49,11 @@ unsigned long lowmem_size; ...@@ -49,17 +49,11 @@ unsigned long lowmem_size;
EXPORT_SYMBOL(min_low_pfn); EXPORT_SYMBOL(min_low_pfn);
EXPORT_SYMBOL(max_low_pfn); EXPORT_SYMBOL(max_low_pfn);
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
static void __init highmem_init(void) static void __init highmem_init(void)
{ {
pr_debug("%x\n", (u32)PKMAP_BASE); pr_debug("%x\n", (u32)PKMAP_BASE);
map_page(PKMAP_BASE, 0, 0); /* XXX gross */ map_page(PKMAP_BASE, 0, 0); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE); pkmap_page_table = virt_to_kpte(PKMAP_BASE);
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
} }
static void highmem_setup(void) static void highmem_setup(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment