Commit 5af627a0 authored by Thomas Gleixner's avatar Thomas Gleixner

csky/mm/highmem: Switch to generic kmap atomic

No reason having the same code in every architecture.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Guo Ren <guoren@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20201103095857.681196473@linutronix.de
parent 2a15ba82
......@@ -286,6 +286,7 @@ config NR_CPUS
config HIGHMEM
bool "High Memory Support"
depends on !CPU_CK610
select KMAP_LOCAL
default y
config FORCE_MAX_ZONEORDER
......
......@@ -8,7 +8,7 @@
#include <asm/memory.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#include <asm/kmap_size.h>
#endif
enum fixed_addresses {
......@@ -17,7 +17,7 @@ enum fixed_addresses {
#endif
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#endif
__end_of_fixed_addresses
};
......
......@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/kmap_types.h>
#include <asm/kmap_size.h>
#include <asm/cache.h>
/* undef for production */
......@@ -32,10 +32,12 @@ extern pte_t *pkmap_page_table;
#define ARCH_HAS_KMAP_FLUSH_TLB
extern void kmap_flush_tlb(unsigned long addr);
extern void *kmap_atomic_pfn(unsigned long pfn);
#define flush_cache_kmaps() do {} while (0)
#define arch_kmap_local_post_map(vaddr, pteval) kmap_flush_tlb(vaddr)
#define arch_kmap_local_post_unmap(vaddr) kmap_flush_tlb(vaddr)
extern void kmap_init(void);
#endif /* __KERNEL__ */
......
......@@ -9,8 +9,6 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
void kmap_flush_tlb(unsigned long addr)
......@@ -19,67 +17,7 @@ void kmap_flush_tlb(unsigned long addr)
}
EXPORT_SYMBOL(kmap_flush_tlb);
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
set_pte(kmap_pte-idx, mk_pte(page, prot));
flush_tlb_one((unsigned long)vaddr);
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic_high_prot);
void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int idx;
if (vaddr < FIXADDR_START)
return;
#ifdef CONFIG_DEBUG_HIGHMEM
idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
pte_clear(&init_mm, vaddr, kmap_pte - idx);
flush_tlb_one(vaddr);
#else
(void) idx; /* to kill a warning */
#endif
kmap_atomic_idx_pop();
}
EXPORT_SYMBOL(kunmap_atomic_high);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn)
{
unsigned long vaddr;
int idx, type;
pagefault_disable();
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
flush_tlb_one(vaddr);
return (void *) vaddr;
}
static void __init kmap_pages_init(void)
void __init kmap_init(void)
{
unsigned long vaddr;
pgd_t *pgd;
......@@ -96,14 +34,3 @@ static void __init kmap_pages_init(void)
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
}
void __init kmap_init(void)
{
unsigned long vaddr;
kmap_pages_init();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment