Commit 3f65ce4d authored by Chris Zankel's avatar Chris Zankel Committed by Linus Torvalds

[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 5

The attached patches provides part 5 of an architecture implementation for the
Tensilica Xtensa CPU series.
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 249ac17e
#
# Makefile for the Linux/Xtensa-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definition is now in the main makefile...
obj-y := init.o fault.o tlb.o misc.o
obj-m :=
obj-n :=
obj- :=
// TODO VM_EXEC flag work-around, cache aliasing
/*
* arch/xtensa/mm/fault.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/hardirq.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
unsigned long asid_cache = ASID_FIRST_VERSION;
void bad_page_fault(struct pt_regs*, unsigned long, int);
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* Note: does not handle Miss and MultiHit.
*/
void do_page_fault(struct pt_regs *regs)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
unsigned int exccause = regs->exccause;
unsigned int address = regs->excvaddr;
siginfo_t info;
int is_write, is_exec;
info.si_code = SEGV_MAPERR;
/* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*/
if (address >= TASK_SIZE && !user_mode(regs))
goto vmalloc_fault;
/* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm) {
bad_page_fault(regs, address, SIGSEGV);
return;
}
is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE ||
exccause == XCHAL_EXCCAUSE_ITLB_MISS ||
exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
#if 0
printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
#endif
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else if (is_exec) {
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
} else /* Allow read even from write-only pages. */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
goto bad_area;
/* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
survive:
switch (handle_mm_fault(mm, vma, address, is_write)) {
case VM_FAULT_MINOR:
current->min_flt++;
break;
case VM_FAULT_MAJOR:
current->maj_flt++;
break;
case VM_FAULT_SIGBUS:
goto do_sigbus;
case VM_FAULT_OOM:
goto out_of_memory;
default:
BUG();
}
up_read(&mm->mmap_sem);
return;
/* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
current->thread.bad_vaddr = address;
current->thread.error_code = is_write;
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, current);
return;
}
bad_page_fault(regs, address, SIGSEGV);
return;
/* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (current->pid == 1) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk("VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_exit(SIGKILL);
bad_page_fault(regs, address, SIGKILL);
return;
do_sigbus:
up_read(&mm->mmap_sem);
/* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
current->thread.bad_vaddr = address;
info.si_code = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *) address;
force_sig_info(SIGBUS, &info, current);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS);
vmalloc_fault:
{
/* Synchronize this task's top level page-table
* with the 'reference' page table.
*/
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
if (act_mm == NULL)
goto bad_page_fault;
pgd = act_mm->pgd + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
goto bad_page_fault;
pgd_val(*pgd) = pgd_val(*pgd_k);
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_page_fault;
pmd_val(*pmd) = pmd_val(*pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto bad_page_fault;
return;
}
bad_page_fault:
bad_page_fault(regs, address, SIGKILL);
return;
}
void
bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
extern void die(const char*, struct pt_regs*, long);
const struct exception_table_entry *entry;
/* Are we prepared to handle this kernel fault? */
if ((entry = search_exception_tables(regs->pc)) != NULL) {
#if 1
printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
current->comm, regs->pc, entry->fixup);
#endif
current->thread.bad_uaddr = address;
regs->pc = entry->fixup;
return;
}
/* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
"address %08lx\n pc = %08lx, ra = %08lx\n",
address, regs->pc, regs->areg[0]);
die("Oops", regs, sig);
do_exit(sig);
}
This diff is collapsed.
/*
* arch/xtensa/mm/misc.S
*
* Miscellaneous assembly functions.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
/* Note: we might want to implement some of the loops as zero-overhead-loops,
* where applicable and if supported by the processor.
*/
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <xtensa/cacheasm.h>
#include <xtensa/cacheattrasm.h>
/* clear_page (page) */
ENTRY(clear_page)
entry a1, 16
addi a4, a2, PAGE_SIZE
movi a3, 0
1: s32i a3, a2, 0
s32i a3, a2, 4
s32i a3, a2, 8
s32i a3, a2, 12
s32i a3, a2, 16
s32i a3, a2, 20
s32i a3, a2, 24
s32i a3, a2, 28
addi a2, a2, 32
blt a2, a4, 1b
retw
/*
* copy_page (void *to, void *from)
* a2 a3
*/
ENTRY(copy_page)
entry a1, 16
addi a4, a2, PAGE_SIZE
1: l32i a5, a3, 0
l32i a6, a3, 4
l32i a7, a3, 8
s32i a5, a2, 0
s32i a6, a2, 4
s32i a7, a2, 8
l32i a5, a3, 12
l32i a6, a3, 16
l32i a7, a3, 20
s32i a5, a2, 12
s32i a6, a2, 16
s32i a7, a2, 20
l32i a5, a3, 24
l32i a6, a3, 28
s32i a5, a2, 24
s32i a6, a2, 28
addi a2, a2, 32
addi a3, a3, 32
blt a2, a4, 1b
retw
/*
* void __flush_invalidate_cache_all(void)
*/
ENTRY(__flush_invalidate_cache_all)
entry sp, 16
dcache_writeback_inv_all a2, a3
icache_invalidate_all a2, a3
retw
/*
* void __invalidate_icache_all(void)
*/
ENTRY(__invalidate_icache_all)
entry sp, 16
icache_invalidate_all a2, a3
retw
/*
* void __flush_invalidate_dcache_all(void)
*/
ENTRY(__flush_invalidate_dcache_all)
entry sp, 16
dcache_writeback_inv_all a2, a3
retw
/*
* void __flush_invalidate_cache_range(ulong start, ulong size)
*/
ENTRY(__flush_invalidate_cache_range)
entry sp, 16
mov a4, a2
mov a5, a3
dcache_writeback_inv_region a4, a5, a6
icache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_icache_page(ulong start)
*/
ENTRY(__invalidate_icache_page)
entry sp, 16
movi a3, PAGE_SIZE
icache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_dcache_page(ulong start)
*/
ENTRY(__invalidate_dcache_page)
entry sp, 16
movi a3, PAGE_SIZE
dcache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_icache_range(ulong start, ulong size)
*/
ENTRY(__invalidate_icache_range)
entry sp, 16
icache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(__invalidate_dcache_range)
entry sp, 16
dcache_invalidate_region a2, a3, a4
retw
/*
* void __flush_dcache_page(ulong start)
*/
ENTRY(__flush_dcache_page)
entry sp, 16
movi a3, PAGE_SIZE
dcache_writeback_region a2, a3, a4
retw
/*
* void __flush_invalidate_dcache_page(ulong start)
*/
ENTRY(__flush_invalidate_dcache_page)
entry sp, 16
movi a3, PAGE_SIZE
dcache_writeback_inv_region a2, a3, a4
retw
/*
* void __flush_invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(__flush_invalidate_dcache_range)
entry sp, 16
dcache_writeback_inv_region a2, a3, a4
retw
/*
* void __invalidate_dcache_all(void)
*/
ENTRY(__invalidate_dcache_all)
entry sp, 16
dcache_invalidate_all a2, a3
retw
/*
* void __flush_invalidate_dcache_page_phys(ulong start)
*/
ENTRY(__flush_invalidate_dcache_page_phys)
entry sp, 16
movi a3, XCHAL_DCACHE_SIZE
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a3, 2, 1b
retw
2: diwbi a3, 0
bgeui a3, 2, 1b
retw
ENTRY(check_dcache_low0)
entry sp, 16
movi a3, XCHAL_DCACHE_SIZE / 4
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a3, 2, 1b
retw
2: j 2b
ENTRY(check_dcache_high0)
entry sp, 16
movi a5, XCHAL_DCACHE_SIZE / 4
movi a3, XCHAL_DCACHE_SIZE / 2
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
addi a5, a5, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a5, 2, 1b
retw
2: j 2b
ENTRY(check_dcache_low1)
entry sp, 16
movi a5, XCHAL_DCACHE_SIZE / 4
movi a3, XCHAL_DCACHE_SIZE * 3 / 4
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
addi a5, a5, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a5, 2, 1b
retw
2: j 2b
ENTRY(check_dcache_high1)
entry sp, 16
movi a5, XCHAL_DCACHE_SIZE / 4
movi a3, XCHAL_DCACHE_SIZE
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
addi a5, a5, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a5, 2, 1b
retw
2: j 2b
/*
* void __invalidate_icache_page_phys(ulong start)
*/
ENTRY(__invalidate_icache_page_phys)
entry sp, 16
movi a3, XCHAL_ICACHE_SIZE
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_ICACHE_LINESIZE
lict a6, a3
isync
and a6, a6, a4
beq a6, a2, 2f
bgeui a3, 2, 1b
retw
2: iii a3, 0
bgeui a3, 2, 1b
retw
#if 0
movi a3, XCHAL_DCACHE_WAYS - 1
movi a4, PAGE_SIZE
1: mov a5, a2
add a6, a2, a4
2: diwbi a5, 0
diwbi a5, XCHAL_DCACHE_LINESIZE
diwbi a5, XCHAL_DCACHE_LINESIZE * 2
diwbi a5, XCHAL_DCACHE_LINESIZE * 3
addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
blt a5, a6, 2b
addi a3, a3, -1
addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
bgez a3, 1b
retw
ENTRY(__invalidate_icache_page_index)
entry sp, 16
movi a3, XCHAL_ICACHE_WAYS - 1
movi a4, PAGE_SIZE
1: mov a5, a2
add a6, a2, a4
2: iii a5, 0
iii a5, XCHAL_ICACHE_LINESIZE
iii a5, XCHAL_ICACHE_LINESIZE * 2
iii a5, XCHAL_ICACHE_LINESIZE * 3
addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
blt a5, a6, 2b
addi a3, a3, -1
addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
bgez a3, 2b
retw
#endif
/*
* arch/xtensa/mm/fault.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
#if (DCACHE_SIZE > PAGE_SIZE)
pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte, p;
int color = ADDR_COLOR(address);
int i;
p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
if (likely(p)) {
struct page *page;
for (i = 0; i < COLOR_SIZE; i++, p++) {
page = virt_to_page(pte);
set_page_count(page, 1);
ClearPageCompound(page);
if (ADDR_COLOR(p) == color)
pte = p;
else
free_page(p);
}
clear_page(pte);
}
return pte;
}
#ifdef PROFILING
int mask;
int hit;
int flush;
#endif
struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page, p;
int color = ADDR_COLOR(address);
p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
if (likely(p)) {
for (i = 0; i < PAGE_ORDER; i++) {
set_page_count(p, 1);
ClearPageCompound(p);
if (PADDR_COLOR(page_address(pg)) == color)
page = p;
else
free_page(p);
}
clear_highpage(page);
}
return page;
}
#endif
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment