Commit 7de9cf47 authored by Greentime Hu's avatar Greentime Hu

nds32: Cache and TLB routines

This patch contains cache and TLB maintenance functions.
Signed-off-by: default avatarVincent Chen <vincentc@andestech.com>
Signed-off-by: default avatarGreentime Hu <greentime@andestech.com>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
parent 664eec40
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_CACHE_H__
#define __NDS32_CACHE_H__
#define L1_CACHE_BYTES 32
#define L1_CACHE_SHIFT 5
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif /* __NDS32_CACHE_H__ */
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
struct cache_info {
unsigned char ways;
unsigned char line_size;
unsigned short sets;
unsigned short size;
#if defined(CONFIG_CPU_CACHE_ALIASING)
unsigned short aliasing_num;
unsigned int aliasing_mask;
#endif
};
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_CACHEFLUSH_H__
#define __NDS32_CACHEFLUSH_H__
#include <linux/mm.h>
#define PG_dcache_dirty PG_arch_1
#ifdef CONFIG_CPU_CACHE_ALIASING
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_dup_mm(struct mm_struct *mm);
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
void flush_cache_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn);
void flush_cache_kmaps(void);
void flush_cache_vmap(unsigned long start, unsigned long end);
void flush_cache_vunmap(unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len);
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len);
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr);
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
void flush_kernel_dcache_page(struct page *page);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_dcache_mmap_lock(mapping) spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) spin_unlock_irq(&(mapping)->tree_lock)
#else
#include <asm-generic/cacheflush.h>
#endif
#endif /* __NDS32_CACHEFLUSH_H__ */
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_MMU_CONTEXT_H
#define __ASM_NDS32_MMU_CONTEXT_H
#include <linux/spinlock.h>
#include <asm/tlbflush.h>
#include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.id = 0;
return 0;
}
#define destroy_context(mm) do { } while(0)
#define CID_BITS 9
extern spinlock_t cid_lock;
extern unsigned int cpu_last_cid;
static inline void __new_context(struct mm_struct *mm)
{
unsigned int cid;
unsigned long flags;
spin_lock_irqsave(&cid_lock, flags);
cid = cpu_last_cid;
cpu_last_cid += 1 << TLB_MISC_offCID;
if (cpu_last_cid == 0)
cpu_last_cid = 1 << TLB_MISC_offCID << CID_BITS;
if ((cid & TLB_MISC_mskCID) == 0)
flush_tlb_all();
spin_unlock_irqrestore(&cid_lock, flags);
mm->context.id = cid;
}
static inline void check_context(struct mm_struct *mm)
{
if (unlikely
((mm->context.id ^ cpu_last_cid) >> TLB_MISC_offCID >> CID_BITS))
__new_context(mm);
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int cpu = smp_processor_id();
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_context(next);
cpu_switch_mm(next);
}
}
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
#endif
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_PROCFNS_H__
#define __NDS32_PROCFNS_H__
#ifdef __KERNEL__
#include <asm/page.h>
struct mm_struct;
struct vm_area_struct;
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern void cpu_do_idle(void);
extern void cpu_reset(unsigned long reset);
extern void cpu_switch_mm(struct mm_struct *mm);
extern void cpu_dcache_inval_all(void);
extern void cpu_dcache_wbinval_all(void);
extern void cpu_dcache_inval_page(unsigned long page);
extern void cpu_dcache_wb_page(unsigned long page);
extern void cpu_dcache_wbinval_page(unsigned long page);
extern void cpu_dcache_inval_range(unsigned long start, unsigned long end);
extern void cpu_dcache_wb_range(unsigned long start, unsigned long end);
extern void cpu_dcache_wbinval_range(unsigned long start, unsigned long end);
extern void cpu_icache_inval_all(void);
extern void cpu_icache_inval_page(unsigned long page);
extern void cpu_icache_inval_range(unsigned long start, unsigned long end);
extern void cpu_cache_wbinval_page(unsigned long page, int flushi);
extern void cpu_cache_wbinval_range(unsigned long start,
unsigned long end, int flushi);
extern void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
unsigned long start,
unsigned long end, bool flushi,
bool wbd);
extern void cpu_dma_wb_range(unsigned long start, unsigned long end);
extern void cpu_dma_inval_range(unsigned long start, unsigned long end);
extern void cpu_dma_wbinval_range(unsigned long start, unsigned long end);
#endif /* __KERNEL__ */
#endif /* __NDS32_PROCFNS_H__ */
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASMNDS32_TLB_H
#define __ASMNDS32_TLB_H
#define tlb_start_vma(tlb,vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb,vma) \
do { \
if(!tlb->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tln)->mm, pmd)
#endif
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_TLBFLUSH_H
#define _ASMNDS32_TLBFLUSH_H
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <nds32_intrinsic.h>
static inline void local_flush_tlb_all(void)
{
__nds32__tlbop_flua();
__nds32__isb();
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
__nds32__tlbop_flua();
__nds32__isb();
}
static inline void local_flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
while (start < end) {
__nds32__tlbop_inv(start);
__nds32__isb();
start += PAGE_SIZE;
}
}
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_range local_flush_tlb_range
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_kernel_range local_flush_tlb_kernel_range
void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t * pte);
void tlb_migrate_finish(struct mm_struct *mm);
#endif
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 1994, 1995, 1996 by Ralf Baechle
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_CACHECTL
#define _ASM_CACHECTL
/*
* Options for cacheflush system call
*/
#define ICACHE 0 /* flush instruction cache */
#define DCACHE 1 /* writeback and flush data cache */
#define BCACHE 2 /* flush instruction cache + writeback and flush data cache */
#endif /* _ASM_CACHECTL */
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/cpu.h>
static void ci_leaf_init(struct cacheinfo *this_leaf,
enum cache_type type, unsigned int level)
{
char cache_type = (type & CACHE_TYPE_INST ? ICACHE : DCACHE);
this_leaf->level = level;
this_leaf->type = type;
this_leaf->coherency_line_size = CACHE_LINE_SIZE(cache_type);
this_leaf->number_of_sets = CACHE_SET(cache_type);;
this_leaf->ways_of_associativity = CACHE_WAY(cache_type);
this_leaf->size = this_leaf->number_of_sets *
this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
#if defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
this_leaf->attributes = CACHE_WRITE_THROUGH;
#else
this_leaf->attributes = CACHE_WRITE_BACK;
#endif
}
int init_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
/* Only 1 level and I/D cache seperate. */
this_cpu_ci->num_levels = 1;
this_cpu_ci->num_leaves = 2;
return 0;
}
int populate_cache_leaves(unsigned int cpu)
{
unsigned int level, idx;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
}
return 0;
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/proc-fns.h>
#include <asm/shmparam.h>
#include <asm/cache_info.h>
extern struct cache_info L1_cache_info[2];
#ifndef CONFIG_CPU_CACHE_ALIASING
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t * pte)
{
struct page *page;
unsigned long pfn = pte_pfn(*pte);
unsigned long flags;
if (!pfn_valid(pfn))
return;
if (vma->vm_mm == current->active_mm) {
local_irq_save(flags);
__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
__nds32__tlbop_rwr(*pte);
__nds32__isb();
local_irq_restore(flags);
}
page = pfn_to_page(pfn);
if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
(vma->vm_flags & VM_EXEC)) {
if (!PageHighMem(page)) {
cpu_cache_wbinval_page((unsigned long)
page_address(page),
vma->vm_flags & VM_EXEC);
} else {
unsigned long kaddr = (unsigned long)kmap_atomic(page);
cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
kunmap_atomic((void *)kaddr);
}
}
}
#else
extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
static inline unsigned long aliasing(unsigned long addr, unsigned long page)
{
return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
}
static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
{
unsigned long kaddr, pte;
#define BASE_ADDR0 0xffffc000
kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
pte = (pa | PAGE_KERNEL);
__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
__nds32__tlbop_rwlk(pte);
__nds32__isb();
return kaddr;
}
static inline void kunmap01(unsigned long kaddr)
{
__nds32__tlbop_unlk(kaddr);
__nds32__tlbop_inv(kaddr);
__nds32__isb();
}
static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
{
unsigned long kaddr, pte;
#define BASE_ADDR1 0xffff8000
kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
pte = (pa | PAGE_KERNEL);
__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
__nds32__tlbop_rwlk(pte);
__nds32__isb();
return kaddr;
}
void flush_cache_mm(struct mm_struct *mm)
{
unsigned long flags;
local_irq_save(flags);
cpu_dcache_wbinval_all();
cpu_icache_inval_all();
local_irq_restore(flags);
}
void flush_cache_dup_mm(struct mm_struct *mm)
{
}
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
unsigned long flags;
if ((end - start) > 8 * PAGE_SIZE) {
cpu_dcache_wbinval_all();
if (vma->vm_flags & VM_EXEC)
cpu_icache_inval_all();
return;
}
local_irq_save(flags);
while (start < end) {
if (va_present(vma->vm_mm, start))
cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
start += PAGE_SIZE;
}
local_irq_restore(flags);
return;
}
void flush_cache_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
unsigned long vto, flags;
local_irq_save(flags);
vto = kremap0(addr, pfn << PAGE_SHIFT);
cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
kunmap01(vto);
local_irq_restore(flags);
}
void flush_cache_vmap(unsigned long start, unsigned long end)
{
cpu_dcache_wbinval_all();
cpu_icache_inval_all();
}
void flush_cache_vunmap(unsigned long start, unsigned long end)
{
cpu_dcache_wbinval_all();
cpu_icache_inval_all();
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
kto = ((unsigned long)page_address(to) & PAGE_MASK);
kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
pto = page_to_phys(to);
pfrom = page_to_phys(from);
if (aliasing(vaddr, (unsigned long)kfrom))
cpu_dcache_wb_page((unsigned long)kfrom);
if (aliasing(vaddr, (unsigned long)kto))
cpu_dcache_inval_page((unsigned long)kto);
local_irq_save(flags);
vto = kremap0(vaddr, pto);
vfrom = kremap1(vaddr, pfrom);
copy_page((void *)vto, (void *)vfrom);
kunmap01(vfrom);
kunmap01(vto);
local_irq_restore(flags);
}
EXPORT_SYMBOL(copy_user_highpage);
void clear_user_highpage(struct page *page, unsigned long vaddr)
{
unsigned long vto, flags, kto;
kto = ((unsigned long)page_address(page) & PAGE_MASK);
local_irq_save(flags);
if (aliasing(kto, vaddr) && kto != 0) {
cpu_dcache_inval_page(kto);
cpu_icache_inval_page(kto);
}
vto = kremap0(vaddr, page_to_phys(page));
clear_page((void *)vto);
kunmap01(vto);
local_irq_restore(flags);
}
EXPORT_SYMBOL(clear_user_highpage);
void flush_dcache_page(struct page *page)
{
struct address_space *mapping;
mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else {
int i, pc;
unsigned long vto, kaddr, flags;
kaddr = (unsigned long)page_address(page);
cpu_dcache_wbinval_page(kaddr);
pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
local_irq_save(flags);
for (i = 0; i < pc; i++) {
vto =
kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
cpu_dcache_wbinval_page(vto);
kunmap01(vto);
}
local_irq_restore(flags);
}
}
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len)
{
unsigned long line_size, start, end, vto, flags;
local_irq_save(flags);
vto = kremap0(vaddr, page_to_phys(page));
dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC) {
line_size = L1_cache_info[DCACHE].line_size;
start = (unsigned long)dst & ~(line_size - 1);
end =
((unsigned long)dst + len + line_size - 1) & ~(line_size -
1);
cpu_cache_wbinval_range(start, end, 1);
}
kunmap01(vto);
local_irq_restore(flags);
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len)
{
unsigned long vto, flags;
local_irq_save(flags);
vto = kremap0(vaddr, page_to_phys(page));
src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
memcpy(dst, src, len);
kunmap01(vto);
local_irq_restore(flags);
}
void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr)
{
unsigned long flags;
if (!PageAnon(page))
return;
if (vma->vm_mm != current->active_mm)
return;
local_irq_save(flags);
if (vma->vm_flags & VM_EXEC)
cpu_icache_inval_page(vaddr & PAGE_MASK);
cpu_dcache_wbinval_page((unsigned long)page_address(page));
local_irq_restore(flags);
}
void flush_kernel_dcache_page(struct page *page)
{
unsigned long flags;
local_irq_save(flags);
cpu_dcache_wbinval_page((unsigned long)page_address(page));
local_irq_restore(flags);
}
void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long line_size, flags;
line_size = L1_cache_info[DCACHE].line_size;
start = start & ~(line_size - 1);
end = (end + line_size - 1) & ~(line_size - 1);
local_irq_save(flags);
cpu_cache_wbinval_range(start, end, 1);
local_irq_restore(flags);
}
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
unsigned long flags;
local_irq_save(flags);
cpu_cache_wbinval_page((unsigned long)page_address(page),
vma->vm_flags & VM_EXEC);
local_irq_restore(flags);
}
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t * pte)
{
struct page *page;
unsigned long flags;
unsigned long pfn = pte_pfn(*pte);
if (!pfn_valid(pfn))
return;
if (vma->vm_mm == current->active_mm) {
local_irq_save(flags);
__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
__nds32__tlbop_rwr(*pte);
__nds32__isb();
local_irq_restore(flags);
}
page = pfn_to_page(pfn);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
(vma->vm_flags & VM_EXEC)) {
local_irq_save(flags);
cpu_dcache_wbinval_page((unsigned long)page_address(page));
local_irq_restore(flags);
}
}
#endif
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/spinlock_types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/nds32.h>
#include <nds32_intrinsic.h>
unsigned int cpu_last_cid = { TLB_MISC_mskCID + (2 << TLB_MISC_offCID) };
DEFINE_SPINLOCK(cid_lock);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
unsigned long flags, ocid, ncid;
if ((end - start) > 0x400000) {
__nds32__tlbop_flua();
__nds32__isb();
return;
}
spin_lock_irqsave(&cid_lock, flags);
ocid = __nds32__mfsr(NDS32_SR_TLB_MISC);
ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
__nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC);
while (start < end) {
__nds32__tlbop_inv(start);
__nds32__isb();
start += PAGE_SIZE;
}
__nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC);
spin_unlock_irqrestore(&cid_lock, flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
unsigned long flags, ocid, ncid;
spin_lock_irqsave(&cid_lock, flags);
ocid = __nds32__mfsr(NDS32_SR_TLB_MISC);
ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
__nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC);
__nds32__tlbop_inv(addr);
__nds32__isb();
__nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC);
spin_unlock_irqrestore(&cid_lock, flags);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment