Commit 6ee127b7 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc updates from David Miller:
 "Nothing much this merge window for sparc.

  1) Fix FPU state management in sparc32, from Tkhai Kirill.

  2) More sparc32 mm layer code cleanups, largely more side effects of
     the sun4c support removal in the 3.5 From Sam Ravnborg.

  3) Remove unused code in sparc64, from Bjorn Helgaas and Kirill Tkhai.

  4) Some declaration and comment tidies in PROM support code, from
     Geert Uytterhoeven."

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: (24 commits)
  sparc32, copy_thread: Clear TIF_USEDFPU flag of created task instead of current
  sparc32: delete dead code in show_mem()
  sparc32: move kmap_init() to highmem.c
  sparc32: move probe_memory() to srmmu.c
  sparc32: drop unused BAD_PAGE stuff
  sparc32: centralize all mmu context handling in srmmu.c
  sparc32: drop quicklist
  sparc32: drop sparc model check in paging_init
  sparc32: drop sparc_unmapped_base
  sparc32,leon: drop leon_init()
  sparc32: drop fixmap.h
  sparc32: fixmap.h cleanup
  sparc32: drop unused kmap_atomic_to_page
  sparc32: drop swapper_pg_dir
  sparc32: beautify srmmu_inherit_prom_mappings()
  sparc32: use void * in nocache get/free
  sparc32: fix coding-style in srmmu.c
  sparc32: sort includes in srmmu.c
  sparc32: define a few srmmu functions __init
  sparc64: remove unused function straddles_64bit_va_hole()
  ...
parents 1e30c1b3 427f23cb
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#include <linux/kernel.h>
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the top of unused virtual memory (0xfd000000 - 1 page) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
/*
* on UP currently we will have no trace of the fixmap mechanism,
* no page table allocations, etc. This might change in the
* future, say framebuffers for the console driver(s) could be
* fix-mapped?
*/
enum fixed_addresses {
FIX_HOLE,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
__end_of_fixed_addresses
};
extern void __set_fixmap (enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
/*
* used by vmalloc.c.
*
* Leave one empty page between IO pages at 0xfd000000 and
* the start of the fixmap.
*/
#define FIXADDR_TOP (0xfcfff000UL)
#define FIXADDR_SIZE ((__end_of_fixed_addresses) << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without tranlation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/fixmap.h>
#include <asm/vaddrs.h> #include <asm/vaddrs.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -29,7 +28,6 @@ ...@@ -29,7 +28,6 @@
/* declarations for highmem.c */ /* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn; extern unsigned long highstart_pfn, highend_pfn;
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot; extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table; extern pte_t *pkmap_page_table;
...@@ -72,7 +70,6 @@ static inline void kunmap(struct page *page) ...@@ -72,7 +70,6 @@ static inline void kunmap(struct page *page)
extern void *kmap_atomic(struct page *page); extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr); extern void __kunmap_atomic(void *kvaddr);
extern struct page *kmap_atomic_to_page(void *vaddr);
#define flush_cache_kmaps() flush_cache_all() #define flush_cache_kmaps() flush_cache_all()
......
...@@ -82,7 +82,6 @@ static inline unsigned long leon_load_reg(unsigned long paddr) ...@@ -82,7 +82,6 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x)) #define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v)) #define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
extern void leon_init(void);
extern void leon_switch_mm(void); extern void leon_switch_mm(void);
extern void leon_init_IRQ(void); extern void leon_init_IRQ(void);
......
...@@ -9,14 +9,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -9,14 +9,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
/* /* Initialize a new mmu context. This is invoked when a new
* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated. * address space instance (unique or shared) is instantiated.
*/ */
#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
/* /* Destroy a dead context. This occurs when mmput drops the
* Destroy a dead context. This occurs when mmput drops the
* mm_users count to zero, the mmaps have been released, and * mm_users count to zero, the mmaps have been released, and
* all the page tables have been flushed. Our job is to destroy * all the page tables have been flushed. Our job is to destroy
* any remaining processor-specific state. * any remaining processor-specific state.
......
...@@ -107,8 +107,7 @@ typedef unsigned long iopgprot_t; ...@@ -107,8 +107,7 @@ typedef unsigned long iopgprot_t;
typedef struct page *pgtable_t; typedef struct page *pgtable_t;
extern unsigned long sparc_unmapped_base; #define TASK_UNMAPPED_BASE 0x50000000
#define TASK_UNMAPPED_BASE sparc_unmapped_base
#else /* !(__ASSEMBLY__) */ #else /* !(__ASSEMBLY__) */
......
...@@ -11,28 +11,15 @@ ...@@ -11,28 +11,15 @@
struct page; struct page;
extern struct pgtable_cache_struct { void *srmmu_get_nocache(int size, int align);
unsigned long *pgd_cache; void srmmu_free_nocache(void *addr, int size);
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
unsigned long pgd_cache_sz;
} pgt_quicklists;
unsigned long srmmu_get_nocache(int size, int align);
void srmmu_free_nocache(unsigned long vaddr, int size);
#define pgd_quicklist (pgt_quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (pgt_quicklists.pte_cache)
#define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
#define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
pgd_t *get_pgd_fast(void); pgd_t *get_pgd_fast(void);
static inline void free_pgd_fast(pgd_t *pgd) static inline void free_pgd_fast(pgd_t *pgd)
{ {
srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); srmmu_free_nocache(pgd, SRMMU_PGD_TABLE_SIZE);
} }
#define pgd_free(mm, pgd) free_pgd_fast(pgd) #define pgd_free(mm, pgd) free_pgd_fast(pgd)
...@@ -50,13 +37,13 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) ...@@ -50,13 +37,13 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, return srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
SRMMU_PMD_TABLE_SIZE); SRMMU_PMD_TABLE_SIZE);
} }
static inline void free_pmd_fast(pmd_t * pmd) static inline void free_pmd_fast(pmd_t * pmd)
{ {
srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE);
} }
#define pmd_free(mm, pmd) free_pmd_fast(pmd) #define pmd_free(mm, pmd) free_pmd_fast(pmd)
...@@ -73,13 +60,13 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); ...@@ -73,13 +60,13 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); return srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
} }
static inline void free_pte_fast(pte_t *pte) static inline void free_pte_fast(pte_t *pte)
{ {
srmmu_free_nocache((unsigned long)pte, PTE_SIZE); srmmu_free_nocache(pte, PTE_SIZE);
} }
#define pte_free_kernel(mm, pte) free_pte_fast(pte) #define pte_free_kernel(mm, pte) free_pte_fast(pte)
......
...@@ -52,8 +52,9 @@ extern unsigned long calc_highpages(void); ...@@ -52,8 +52,9 @@ extern unsigned long calc_highpages(void);
#define PAGE_READONLY SRMMU_PAGE_RDONLY #define PAGE_READONLY SRMMU_PAGE_RDONLY
#define PAGE_KERNEL SRMMU_PAGE_KERNEL #define PAGE_KERNEL SRMMU_PAGE_KERNEL
/* Top-level page directory */ /* Top-level page directory - dummy used by init-mm.
extern pgd_t swapper_pg_dir[1024]; * srmmu.c will assign the real one (which is dynamically sized) */
#define swapper_pg_dir NULL
extern void paging_init(void); extern void paging_init(void);
...@@ -78,8 +79,6 @@ extern unsigned long ptr_in_current_pgd; ...@@ -78,8 +79,6 @@ extern unsigned long ptr_in_current_pgd;
#define __S110 PAGE_SHARED #define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED #define __S111 PAGE_SHARED
extern int num_contexts;
/* First physical page can be anywhere, the following is needed so that /* First physical page can be anywhere, the following is needed so that
* va-->pa and vice versa conversions work properly without performance * va-->pa and vice versa conversions work properly without performance
* hit for all __pa()/__va() operations. * hit for all __pa()/__va() operations.
...@@ -88,18 +87,11 @@ extern unsigned long phys_base; ...@@ -88,18 +87,11 @@ extern unsigned long phys_base;
extern unsigned long pfn_base; extern unsigned long pfn_base;
/* /*
* BAD_PAGETABLE is used when we need a bogus page-table, while
* BAD_PAGE is used for a bogus page.
*
* ZERO_PAGE is a global shared page that is always zero: used * ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc.. * for zero-mapped memory areas etc..
*/ */
extern pte_t * __bad_pagetable(void);
extern pte_t __bad_page(void);
extern unsigned long empty_zero_page; extern unsigned long empty_zero_page;
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
/* /*
...@@ -398,36 +390,6 @@ static inline pte_t pgoff_to_pte(unsigned long pgoff) ...@@ -398,36 +390,6 @@ static inline pte_t pgoff_to_pte(unsigned long pgoff)
*/ */
#define PTE_FILE_MAX_BITS 24 #define PTE_FILE_MAX_BITS 24
/*
*/
struct ctx_list {
struct ctx_list *next;
struct ctx_list *prev;
unsigned int ctx_number;
struct mm_struct *ctx_mm;
};
extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
extern struct ctx_list ctx_free; /* Head of free list */
extern struct ctx_list ctx_used; /* Head of used contexts list */
#define NO_CONTEXT -1
static inline void remove_from_ctx_list(struct ctx_list *entry)
{
entry->next->prev = entry->prev;
entry->prev->next = entry->next;
}
static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
{
entry->next = head;
(entry->prev = head->prev)->next = entry;
head->prev = entry;
}
#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
static inline unsigned long static inline unsigned long
__get_phys (unsigned long addr) __get_phys (unsigned long addr)
{ {
......
...@@ -30,6 +30,28 @@ ...@@ -30,6 +30,28 @@
*/ */
#define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */ #define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */
#ifndef __ASSEMBLY__
#include <asm/kmap_types.h>
enum fixed_addresses {
FIX_HOLE,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN,
FIX_KMAP_END = (KM_TYPE_NR * NR_CPUS),
#endif
__end_of_fixed_addresses
};
#endif
/* Leave one empty page between IO pages at 0xfd000000 and
* the top of the fixmap.
*/
#define FIXADDR_TOP (0xfcfff000UL)
#define FIXADDR_SIZE ((FIX_KMAP_END + 1) << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */ #define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */
#define IOBASE_VADDR 0xfe000000 #define IOBASE_VADDR 0xfe000000
#define IOBASE_END 0xfe600000 #define IOBASE_END 0xfe600000
......
...@@ -58,8 +58,6 @@ sun4e_notsup: ...@@ -58,8 +58,6 @@ sun4e_notsup:
/* This was the only reasonable way I could think of to properly align /* This was the only reasonable way I could think of to properly align
* these page-table data structures. * these page-table data structures.
*/ */
.globl swapper_pg_dir
swapper_pg_dir: .skip PAGE_SIZE
.globl empty_zero_page .globl empty_zero_page
empty_zero_page: .skip PAGE_SIZE empty_zero_page: .skip PAGE_SIZE
......
...@@ -486,17 +486,6 @@ void __init leon_trans_init(struct device_node *dp) ...@@ -486,17 +486,6 @@ void __init leon_trans_init(struct device_node *dp)
} }
} }
void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0;
void __init leon_node_init(struct device_node *dp, struct device_node ***nextp)
{
if (prom_amba_init &&
strcmp(dp->type, "ambapp") == 0 &&
strcmp(dp->name, "ambapp0") == 0) {
prom_amba_init(dp, nextp);
}
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void leon_clear_profile_irq(int cpu) void leon_clear_profile_irq(int cpu)
{ {
...@@ -522,8 +511,3 @@ void __init leon_init_IRQ(void) ...@@ -522,8 +511,3 @@ void __init leon_init_IRQ(void)
sparc_config.clear_clock_irq = leon_clear_clock_irq; sparc_config.clear_clock_irq = leon_clear_clock_irq;
sparc_config.load_profile_irq = leon_load_profile_irq; sparc_config.load_profile_irq = leon_load_profile_irq;
} }
void __init leon_init(void)
{
of_pdt_build_more = &leon_node_init;
}
...@@ -333,9 +333,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, ...@@ -333,9 +333,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
put_psr(get_psr() | PSR_EF); put_psr(get_psr() | PSR_EF);
fpsave(&p->thread.float_regs[0], &p->thread.fsr, fpsave(&p->thread.float_regs[0], &p->thread.fsr,
&p->thread.fpqueue[0], &p->thread.fpqdepth); &p->thread.fpqueue[0], &p->thread.fpqdepth);
#ifdef CONFIG_SMP
clear_thread_flag(TIF_USEDFPU);
#endif
} }
/* /*
...@@ -413,6 +410,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, ...@@ -413,6 +410,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* FPU must be disabled on SMP. */ /* FPU must be disabled on SMP. */
childregs->psr &= ~PSR_EF; childregs->psr &= ~PSR_EF;
clear_tsk_thread_flag(p, TIF_USEDFPU);
#endif #endif
/* Set the return value for the child. */ /* Set the return value for the child. */
......
...@@ -371,7 +371,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -371,7 +371,6 @@ void __init setup_arch(char **cmdline_p)
(*(linux_dbvec->teach_debugger))(); (*(linux_dbvec->teach_debugger))();
} }
init_mm.context = (unsigned long) NO_CONTEXT;
init_task.thread.kregs = &fake_swapper_regs; init_task.thread.kregs = &fake_swapper_regs;
/* Run-time patch instructions to match the cpu model */ /* Run-time patch instructions to match the cpu model */
......
...@@ -66,23 +66,6 @@ static inline int invalid_64bit_range(unsigned long addr, unsigned long len) ...@@ -66,23 +66,6 @@ static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
return 0; return 0;
} }
/* Does start,end straddle the VA-space hole? */
static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
{
unsigned long va_exclude_start, va_exclude_end;
va_exclude_start = VA_EXCLUDE_START;
va_exclude_end = VA_EXCLUDE_END;
if (likely(start < va_exclude_start && end < va_exclude_start))
return 0;
if (likely(start >= va_exclude_end && end >= va_exclude_end))
return 0;
return 1;
}
/* These functions differ from the default implementations in /* These functions differ from the default implementations in
* mm/mmap.c in two ways: * mm/mmap.c in two ways:
* *
......
...@@ -90,49 +90,49 @@ ...@@ -90,49 +90,49 @@
faligndata %x7, %x8, %f14; faligndata %x7, %x8, %f14;
#define FREG_MOVE_1(x0) \ #define FREG_MOVE_1(x0) \
fmovd %x0, %f0; fsrc2 %x0, %f0;
#define FREG_MOVE_2(x0, x1) \ #define FREG_MOVE_2(x0, x1) \
fmovd %x0, %f0; \ fsrc2 %x0, %f0; \
fmovd %x1, %f2; fsrc2 %x1, %f2;
#define FREG_MOVE_3(x0, x1, x2) \ #define FREG_MOVE_3(x0, x1, x2) \
fmovd %x0, %f0; \ fsrc2 %x0, %f0; \
fmovd %x1, %f2; \ fsrc2 %x1, %f2; \
fmovd %x2, %f4; fsrc2 %x2, %f4;
#define FREG_MOVE_4(x0, x1, x2, x3) \ #define FREG_MOVE_4(x0, x1, x2, x3) \
fmovd %x0, %f0; \ fsrc2 %x0, %f0; \
fmovd %x1, %f2; \ fsrc2 %x1, %f2; \
fmovd %x2, %f4; \ fsrc2 %x2, %f4; \
fmovd %x3, %f6; fsrc2 %x3, %f6;
#define FREG_MOVE_5(x0, x1, x2, x3, x4) \ #define FREG_MOVE_5(x0, x1, x2, x3, x4) \
fmovd %x0, %f0; \ fsrc2 %x0, %f0; \
fmovd %x1, %f2; \ fsrc2 %x1, %f2; \
fmovd %x2, %f4; \ fsrc2 %x2, %f4; \
fmovd %x3, %f6; \ fsrc2 %x3, %f6; \
fmovd %x4, %f8; fsrc2 %x4, %f8;
#define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \ #define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \
fmovd %x0, %f0; \ fsrc2 %x0, %f0; \
fmovd %x1, %f2; \ fsrc2 %x1, %f2; \
fmovd %x2, %f4; \ fsrc2 %x2, %f4; \
fmovd %x3, %f6; \ fsrc2 %x3, %f6; \
fmovd %x4, %f8; \ fsrc2 %x4, %f8; \
fmovd %x5, %f10; fsrc2 %x5, %f10;
#define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \ #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \
fmovd %x0, %f0; \ fsrc2 %x0, %f0; \
fmovd %x1, %f2; \ fsrc2 %x1, %f2; \
fmovd %x2, %f4; \ fsrc2 %x2, %f4; \
fmovd %x3, %f6; \ fsrc2 %x3, %f6; \
fmovd %x4, %f8; \ fsrc2 %x4, %f8; \
fmovd %x5, %f10; \ fsrc2 %x5, %f10; \
fmovd %x6, %f12; fsrc2 %x6, %f12;
#define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \ #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \
fmovd %x0, %f0; \ fsrc2 %x0, %f0; \
fmovd %x1, %f2; \ fsrc2 %x1, %f2; \
fmovd %x2, %f4; \ fsrc2 %x2, %f4; \
fmovd %x3, %f6; \ fsrc2 %x3, %f6; \
fmovd %x4, %f8; \ fsrc2 %x4, %f8; \
fmovd %x5, %f10; \ fsrc2 %x5, %f10; \
fmovd %x6, %f12; \ fsrc2 %x6, %f12; \
fmovd %x7, %f14; fsrc2 %x7, %f14;
#define FREG_LOAD_1(base, x0) \ #define FREG_LOAD_1(base, x0) \
EX_LD(LOAD(ldd, base + 0x00, %x0)) EX_LD(LOAD(ldd, base + 0x00, %x0))
#define FREG_LOAD_2(base, x0, x1) \ #define FREG_LOAD_2(base, x0, x1) \
......
...@@ -109,7 +109,7 @@ ...@@ -109,7 +109,7 @@
#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ #define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
subcc %left, 8, %left; \ subcc %left, 8, %left; \
bl,pn %xcc, 95f; \ bl,pn %xcc, 95f; \
fsrc1 %f0, %f1; fsrc2 %f0, %f1;
#define UNEVEN_VISCHUNK(dest, f0, f1, left) \ #define UNEVEN_VISCHUNK(dest, f0, f1, left) \
UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
...@@ -201,7 +201,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ...@@ -201,7 +201,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o1, (0x40 - 1), %o1 andn %o1, (0x40 - 1), %o1
and %g2, 7, %g2 and %g2, 7, %g2
andncc %g3, 0x7, %g3 andncc %g3, 0x7, %g3
fmovd %f0, %f2 fsrc2 %f0, %f2
sub %g3, 0x8, %g3 sub %g3, 0x8, %g3
sub %o2, %GLOBAL_SPARE, %o2 sub %o2, %GLOBAL_SPARE, %o2
......
...@@ -34,10 +34,10 @@ ...@@ -34,10 +34,10 @@
#endif #endif
#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \ #define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
fmovd %reg0, %f48; fmovd %reg1, %f50; \ fsrc2 %reg0, %f48; fsrc2 %reg1, %f50; \
fmovd %reg2, %f52; fmovd %reg3, %f54; \ fsrc2 %reg2, %f52; fsrc2 %reg3, %f54; \
fmovd %reg4, %f56; fmovd %reg5, %f58; \ fsrc2 %reg4, %f56; fsrc2 %reg5, %f58; \
fmovd %reg6, %f60; fmovd %reg7, %f62; fsrc2 %reg6, %f60; fsrc2 %reg7, %f62;
.text .text
...@@ -104,60 +104,60 @@ cheetah_copy_page_insn: ...@@ -104,60 +104,60 @@ cheetah_copy_page_insn:
prefetch [%o1 + 0x140], #one_read prefetch [%o1 + 0x140], #one_read
ldd [%o1 + 0x010], %f4 ldd [%o1 + 0x010], %f4
prefetch [%o1 + 0x180], #one_read prefetch [%o1 + 0x180], #one_read
fmovd %f0, %f16 fsrc2 %f0, %f16
ldd [%o1 + 0x018], %f6 ldd [%o1 + 0x018], %f6
fmovd %f2, %f18 fsrc2 %f2, %f18
ldd [%o1 + 0x020], %f8 ldd [%o1 + 0x020], %f8
fmovd %f4, %f20 fsrc2 %f4, %f20
ldd [%o1 + 0x028], %f10 ldd [%o1 + 0x028], %f10
fmovd %f6, %f22 fsrc2 %f6, %f22
ldd [%o1 + 0x030], %f12 ldd [%o1 + 0x030], %f12
fmovd %f8, %f24 fsrc2 %f8, %f24
ldd [%o1 + 0x038], %f14 ldd [%o1 + 0x038], %f14
fmovd %f10, %f26 fsrc2 %f10, %f26
ldd [%o1 + 0x040], %f0 ldd [%o1 + 0x040], %f0
1: ldd [%o1 + 0x048], %f2 1: ldd [%o1 + 0x048], %f2
fmovd %f12, %f28 fsrc2 %f12, %f28
ldd [%o1 + 0x050], %f4 ldd [%o1 + 0x050], %f4
fmovd %f14, %f30 fsrc2 %f14, %f30
stda %f16, [%o0] ASI_BLK_P stda %f16, [%o0] ASI_BLK_P
ldd [%o1 + 0x058], %f6 ldd [%o1 + 0x058], %f6
fmovd %f0, %f16 fsrc2 %f0, %f16
ldd [%o1 + 0x060], %f8 ldd [%o1 + 0x060], %f8
fmovd %f2, %f18 fsrc2 %f2, %f18
ldd [%o1 + 0x068], %f10 ldd [%o1 + 0x068], %f10
fmovd %f4, %f20 fsrc2 %f4, %f20
ldd [%o1 + 0x070], %f12 ldd [%o1 + 0x070], %f12
fmovd %f6, %f22 fsrc2 %f6, %f22
ldd [%o1 + 0x078], %f14 ldd [%o1 + 0x078], %f14
fmovd %f8, %f24 fsrc2 %f8, %f24
ldd [%o1 + 0x080], %f0 ldd [%o1 + 0x080], %f0
prefetch [%o1 + 0x180], #one_read prefetch [%o1 + 0x180], #one_read
fmovd %f10, %f26 fsrc2 %f10, %f26
subcc %o2, 1, %o2 subcc %o2, 1, %o2
add %o0, 0x40, %o0 add %o0, 0x40, %o0
bne,pt %xcc, 1b bne,pt %xcc, 1b
add %o1, 0x40, %o1 add %o1, 0x40, %o1
ldd [%o1 + 0x048], %f2 ldd [%o1 + 0x048], %f2
fmovd %f12, %f28 fsrc2 %f12, %f28
ldd [%o1 + 0x050], %f4 ldd [%o1 + 0x050], %f4
fmovd %f14, %f30 fsrc2 %f14, %f30
stda %f16, [%o0] ASI_BLK_P stda %f16, [%o0] ASI_BLK_P
ldd [%o1 + 0x058], %f6 ldd [%o1 + 0x058], %f6
fmovd %f0, %f16 fsrc2 %f0, %f16
ldd [%o1 + 0x060], %f8 ldd [%o1 + 0x060], %f8
fmovd %f2, %f18 fsrc2 %f2, %f18
ldd [%o1 + 0x068], %f10 ldd [%o1 + 0x068], %f10
fmovd %f4, %f20 fsrc2 %f4, %f20
ldd [%o1 + 0x070], %f12 ldd [%o1 + 0x070], %f12
fmovd %f6, %f22 fsrc2 %f6, %f22
add %o0, 0x40, %o0 add %o0, 0x40, %o0
ldd [%o1 + 0x078], %f14 ldd [%o1 + 0x078], %f14
fmovd %f8, %f24 fsrc2 %f8, %f24
fmovd %f10, %f26 fsrc2 %f10, %f26
fmovd %f12, %f28 fsrc2 %f12, %f28
fmovd %f14, %f30 fsrc2 %f14, %f30
stda %f16, [%o0] ASI_BLK_P stda %f16, [%o0] ASI_BLK_P
membar #Sync membar #Sync
VISExitHalf VISExitHalf
......
...@@ -32,24 +32,6 @@ ...@@ -32,24 +32,6 @@
int show_unhandled_signals = 1; int show_unhandled_signals = 1;
/* At boot time we determine these two values necessary for setting
* up the segment maps and page table entries (pte's).
*/
int num_contexts;
/* Return how much physical memory we have. */
unsigned long probe_memory(void)
{
unsigned long total = 0;
int i;
for (i = 0; sp_banks[i].num_bytes; i++)
total += sp_banks[i].num_bytes;
return total;
}
static void unhandled_fault(unsigned long, struct task_struct *, static void unhandled_fault(unsigned long, struct task_struct *,
struct pt_regs *) __attribute__ ((noreturn)); struct pt_regs *) __attribute__ ((noreturn));
......
...@@ -22,13 +22,31 @@ ...@@ -22,13 +22,31 @@
* shared by CPUs, and so precious, and establishing them requires IPI. * shared by CPUs, and so precious, and establishing them requires IPI.
* Atomic kmaps are lightweight and we may have NCPUS more of them. * Atomic kmaps are lightweight and we may have NCPUS more of them.
*/ */
#include <linux/mm.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/export.h> #include <linux/export.h>
#include <asm/pgalloc.h> #include <linux/mm.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/fixmap.h> #include <asm/pgalloc.h>
#include <asm/vaddrs.h>
pgprot_t kmap_prot;
static pte_t *kmap_pte;
void __init kmap_init(void)
{
unsigned long address;
pmd_t *dir;
address = __fix_to_virt(FIX_KMAP_BEGIN);
dir = pmd_offset(pgd_offset_k(address), address);
/* cache the first kmap pte */
kmap_pte = pte_offset_kernel(dir, address);
kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
}
void *kmap_atomic(struct page *page) void *kmap_atomic(struct page *page)
{ {
...@@ -110,21 +128,3 @@ void __kunmap_atomic(void *kvaddr) ...@@ -110,21 +128,3 @@ void __kunmap_atomic(void *kvaddr)
pagefault_enable(); pagefault_enable();
} }
EXPORT_SYMBOL(__kunmap_atomic); EXPORT_SYMBOL(__kunmap_atomic);
/* We may be fed a pagetable here by ptep_to_xxx and others. */
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
if (vaddr < SRMMU_NOCACHE_VADDR)
return virt_to_page(ptr);
if (vaddr < PKMAP_BASE)
return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
BUG_ON(vaddr < FIXADDR_START);
BUG_ON(vaddr > FIXADDR_TOP);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
...@@ -45,9 +45,6 @@ unsigned long pfn_base; ...@@ -45,9 +45,6 @@ unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base); EXPORT_SYMBOL(pfn_base);
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
unsigned long sparc_unmapped_base;
struct pgtable_cache_struct pgt_quicklists;
/* Initial ramdisk setup */ /* Initial ramdisk setup */
extern unsigned int sparc_ramdisk_image; extern unsigned int sparc_ramdisk_image;
...@@ -55,19 +52,6 @@ extern unsigned int sparc_ramdisk_size; ...@@ -55,19 +52,6 @@ extern unsigned int sparc_ramdisk_size;
unsigned long highstart_pfn, highend_pfn; unsigned long highstart_pfn, highend_pfn;
pte_t *kmap_pte;
pgprot_t kmap_prot;
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
void __init kmap_init(void)
{
/* cache the first kmap pte */
kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
}
void show_mem(unsigned int filter) void show_mem(unsigned int filter)
{ {
printk("Mem-info:\n"); printk("Mem-info:\n");
...@@ -76,33 +60,8 @@ void show_mem(unsigned int filter) ...@@ -76,33 +60,8 @@ void show_mem(unsigned int filter)
nr_swap_pages << (PAGE_SHIFT-10)); nr_swap_pages << (PAGE_SHIFT-10));
printk("%ld pages of RAM\n", totalram_pages); printk("%ld pages of RAM\n", totalram_pages);
printk("%ld free pages\n", nr_free_pages()); printk("%ld free pages\n", nr_free_pages());
#if 0 /* undefined pgtable_cache_size, pgd_cache_size */
printk("%ld pages in page table cache\n",pgtable_cache_size);
#ifndef CONFIG_SMP
if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
printk("%ld entries in page dir cache\n",pgd_cache_size);
#endif
#endif
} }
void __init sparc_context_init(int numctx)
{
int ctx;
ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL);
for(ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;
clist = (ctx_list_pool + ctx);
clist->ctx_number = ctx;
clist->ctx_mm = NULL;
}
ctx_free.next = ctx_free.prev = &ctx_free;
ctx_used.next = ctx_used.prev = &ctx_used;
for(ctx = 0; ctx < numctx; ctx++)
add_to_free_ctxlist(ctx_list_pool + ctx);
}
extern unsigned long cmdline_memory_size; extern unsigned long cmdline_memory_size;
unsigned long last_valid_pfn; unsigned long last_valid_pfn;
...@@ -292,22 +251,7 @@ extern void device_scan(void); ...@@ -292,22 +251,7 @@ extern void device_scan(void);
void __init paging_init(void) void __init paging_init(void)
{ {
switch(sparc_cpu_model) {
case sparc_leon:
leon_init();
/* fall through */
case sun4m:
case sun4d:
srmmu_paging_init(); srmmu_paging_init();
sparc_unmapped_base = 0x50000000;
break;
default:
prom_printf("paging_init: Cannot init paging on this Sparc\n");
prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
prom_printf("paging_init: Halting...\n");
prom_halt();
}
prom_build_devicetree(); prom_build_devicetree();
of_fill_in_cpu_data(); of_fill_in_cpu_data();
device_scan(); device_scan();
......
...@@ -8,45 +8,45 @@ ...@@ -8,45 +8,45 @@
* Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
*/ */
#include <linux/kernel.h> #include <linux/seq_file.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/fs.h> #include <linux/pagemap.h>
#include <linux/seq_file.h> #include <linux/vmalloc.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <asm/bitext.h> #include <asm/mmu_context.h>
#include <asm/page.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/io-unit.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/io.h> #include <asm/bitext.h>
#include <asm/vaddrs.h> #include <asm/vaddrs.h>
#include <asm/traps.h>
#include <asm/smp.h>
#include <asm/mbus.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/traps.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/mbus.h>
#include <asm/page.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/msi.h> #include <asm/msi.h>
#include <asm/mmu_context.h> #include <asm/smp.h>
#include <asm/io-unit.h> #include <asm/io.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/* Now the cpu specific definitions. */ /* Now the cpu specific definitions. */
#include <asm/viking.h> #include <asm/turbosparc.h>
#include <asm/mxcc.h>
#include <asm/ross.h>
#include <asm/tsunami.h> #include <asm/tsunami.h>
#include <asm/viking.h>
#include <asm/swift.h> #include <asm/swift.h>
#include <asm/turbosparc.h>
#include <asm/leon.h> #include <asm/leon.h>
#include <asm/mxcc.h>
#include <asm/ross.h>
#include "srmmu.h" #include "srmmu.h"
...@@ -55,10 +55,6 @@ static unsigned int hwbug_bitmask; ...@@ -55,10 +55,6 @@ static unsigned int hwbug_bitmask;
int vac_cache_size; int vac_cache_size;
int vac_line_size; int vac_line_size;
struct ctx_list *ctx_list_pool;
struct ctx_list ctx_free;
struct ctx_list ctx_used;
extern struct resource sparc_iomap; extern struct resource sparc_iomap;
extern unsigned long last_valid_pfn; extern unsigned long last_valid_pfn;
...@@ -137,7 +133,7 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) ...@@ -137,7 +133,7 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
} }
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address) pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
{ {
void *pte; void *pte;
...@@ -151,17 +147,20 @@ pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address) ...@@ -151,17 +147,20 @@ pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
* align: bytes, number to align at. * align: bytes, number to align at.
* Returns the virtual address of the allocated area. * Returns the virtual address of the allocated area.
*/ */
static unsigned long __srmmu_get_nocache(int size, int align) static void *__srmmu_get_nocache(int size, int align)
{ {
int offset; int offset;
unsigned long addr;
if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
printk("Size 0x%x too small for nocache request\n", size); printk(KERN_ERR "Size 0x%x too small for nocache request\n",
size);
size = SRMMU_NOCACHE_BITMAP_SHIFT; size = SRMMU_NOCACHE_BITMAP_SHIFT;
} }
if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
printk("Size 0x%x unaligned int nocache request\n", size); printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
size += SRMMU_NOCACHE_BITMAP_SHIFT-1; size);
size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
} }
BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
...@@ -169,37 +168,40 @@ static unsigned long __srmmu_get_nocache(int size, int align) ...@@ -169,37 +168,40 @@ static unsigned long __srmmu_get_nocache(int size, int align)
size >> SRMMU_NOCACHE_BITMAP_SHIFT, size >> SRMMU_NOCACHE_BITMAP_SHIFT,
align >> SRMMU_NOCACHE_BITMAP_SHIFT); align >> SRMMU_NOCACHE_BITMAP_SHIFT);
if (offset == -1) { if (offset == -1) {
printk("srmmu: out of nocache %d: %d/%d\n", printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
size, (int) srmmu_nocache_size, size, (int) srmmu_nocache_size,
srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
return 0; return 0;
} }
return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
return (void *)addr;
} }
unsigned long srmmu_get_nocache(int size, int align) void *srmmu_get_nocache(int size, int align)
{ {
unsigned long tmp; void *tmp;
tmp = __srmmu_get_nocache(size, align); tmp = __srmmu_get_nocache(size, align);
if (tmp) if (tmp)
memset((void *)tmp, 0, size); memset(tmp, 0, size);
return tmp; return tmp;
} }
void srmmu_free_nocache(unsigned long vaddr, int size) void srmmu_free_nocache(void *addr, int size)
{ {
unsigned long vaddr;
int offset; int offset;
vaddr = (unsigned long)addr;
if (vaddr < SRMMU_NOCACHE_VADDR) { if (vaddr < SRMMU_NOCACHE_VADDR) {
printk("Vaddr %lx is smaller than nocache base 0x%lx\n", printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
BUG(); BUG();
} }
if (vaddr+size > srmmu_nocache_end) { if (vaddr + size > srmmu_nocache_end) {
printk("Vaddr %lx is bigger than nocache end 0x%lx\n", printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
vaddr, srmmu_nocache_end); vaddr, srmmu_nocache_end);
BUG(); BUG();
...@@ -212,7 +214,7 @@ void srmmu_free_nocache(unsigned long vaddr, int size) ...@@ -212,7 +214,7 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
printk("Size 0x%x is too small\n", size); printk("Size 0x%x is too small\n", size);
BUG(); BUG();
} }
if (vaddr & (size-1)) { if (vaddr & (size - 1)) {
printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
BUG(); BUG();
} }
...@@ -226,13 +228,23 @@ void srmmu_free_nocache(unsigned long vaddr, int size) ...@@ -226,13 +228,23 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
static void srmmu_early_allocate_ptable_skeleton(unsigned long start, static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
unsigned long end); unsigned long end);
extern unsigned long probe_memory(void); /* in fault.c */ /* Return how much physical memory we have. */
static unsigned long __init probe_memory(void)
{
unsigned long total = 0;
int i;
for (i = 0; sp_banks[i].num_bytes; i++)
total += sp_banks[i].num_bytes;
return total;
}
/* /*
* Reserve nocache dynamically proportionally to the amount of * Reserve nocache dynamically proportionally to the amount of
* system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
*/ */
static void srmmu_nocache_calcsize(void) static void __init srmmu_nocache_calcsize(void)
{ {
unsigned long sysmemavail = probe_memory() / 1024; unsigned long sysmemavail = probe_memory() / 1024;
int srmmu_nocache_npages; int srmmu_nocache_npages;
...@@ -271,7 +283,7 @@ static void __init srmmu_nocache_init(void) ...@@ -271,7 +283,7 @@ static void __init srmmu_nocache_init(void)
srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
init_mm.pgd = srmmu_swapper_pg_dir; init_mm.pgd = srmmu_swapper_pg_dir;
...@@ -304,7 +316,7 @@ pgd_t *get_pgd_fast(void) ...@@ -304,7 +316,7 @@ pgd_t *get_pgd_fast(void)
{ {
pgd_t *pgd = NULL; pgd_t *pgd = NULL;
pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
if (pgd) { if (pgd) {
pgd_t *init = pgd_offset_k(0); pgd_t *init = pgd_offset_k(0);
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
...@@ -330,7 +342,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -330,7 +342,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
return NULL; return NULL;
page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
pgtable_page_ctor(page); pgtable_page_ctor(page);
return page; return page;
} }
...@@ -344,18 +356,50 @@ void pte_free(struct mm_struct *mm, pgtable_t pte) ...@@ -344,18 +356,50 @@ void pte_free(struct mm_struct *mm, pgtable_t pte)
if (p == 0) if (p == 0)
BUG(); BUG();
p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
p = (unsigned long) __nocache_va(p); /* Nocached virtual */
srmmu_free_nocache(p, PTE_SIZE); /* free non cached virtual address*/
srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
} }
/* /* context handling - a dynamically sized pool is used */
*/ #define NO_CONTEXT -1
struct ctx_list {
struct ctx_list *next;
struct ctx_list *prev;
unsigned int ctx_number;
struct mm_struct *ctx_mm;
};
static struct ctx_list *ctx_list_pool;
static struct ctx_list ctx_free;
static struct ctx_list ctx_used;
/* At boot time we determine the number of contexts */
static int num_contexts;
static inline void remove_from_ctx_list(struct ctx_list *entry)
{
entry->next->prev = entry->prev;
entry->prev->next = entry->next;
}
static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
{
entry->next = head;
(entry->prev = head->prev)->next = entry;
head->prev = entry;
}
#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
{ {
struct ctx_list *ctxp; struct ctx_list *ctxp;
ctxp = ctx_free.next; ctxp = ctx_free.next;
if(ctxp != &ctx_free) { if (ctxp != &ctx_free) {
remove_from_ctx_list(ctxp); remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp); add_to_used_ctxlist(ctxp);
mm->context = ctxp->ctx_number; mm->context = ctxp->ctx_number;
...@@ -363,9 +407,9 @@ static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) ...@@ -363,9 +407,9 @@ static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
return; return;
} }
ctxp = ctx_used.next; ctxp = ctx_used.next;
if(ctxp->ctx_mm == old_mm) if (ctxp->ctx_mm == old_mm)
ctxp = ctxp->next; ctxp = ctxp->next;
if(ctxp == &ctx_used) if (ctxp == &ctx_used)
panic("out of mmu contexts"); panic("out of mmu contexts");
flush_cache_mm(ctxp->ctx_mm); flush_cache_mm(ctxp->ctx_mm);
flush_tlb_mm(ctxp->ctx_mm); flush_tlb_mm(ctxp->ctx_mm);
...@@ -385,11 +429,31 @@ static inline void free_context(int context) ...@@ -385,11 +429,31 @@ static inline void free_context(int context)
add_to_free_ctxlist(ctx_old); add_to_free_ctxlist(ctx_old);
} }
static void __init sparc_context_init(int numctx)
{
int ctx;
unsigned long size;
size = numctx * sizeof(struct ctx_list);
ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
for (ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;
clist = (ctx_list_pool + ctx);
clist->ctx_number = ctx;
clist->ctx_mm = NULL;
}
ctx_free.next = ctx_free.prev = &ctx_free;
ctx_used.next = ctx_used.prev = &ctx_used;
for (ctx = 0; ctx < numctx; ctx++)
add_to_free_ctxlist(ctx_list_pool + ctx);
}
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
struct task_struct *tsk) struct task_struct *tsk)
{ {
if(mm->context == NO_CONTEXT) { if (mm->context == NO_CONTEXT) {
spin_lock(&srmmu_context_spinlock); spin_lock(&srmmu_context_spinlock);
alloc_context(old_mm, mm); alloc_context(old_mm, mm);
spin_unlock(&srmmu_context_spinlock); spin_unlock(&srmmu_context_spinlock);
...@@ -420,8 +484,7 @@ static inline void srmmu_mapioaddr(unsigned long physaddr, ...@@ -420,8 +484,7 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
ptep = pte_offset_kernel(pmdp, virt_addr); ptep = pte_offset_kernel(pmdp, virt_addr);
tmp = (physaddr >> 4) | SRMMU_ET_PTE; tmp = (physaddr >> 4) | SRMMU_ET_PTE;
/* /* I need to test whether this is consistent over all
* I need to test whether this is consistent over all
* sun4m's. The bus_type represents the upper 4 bits of * sun4m's. The bus_type represents the upper 4 bits of
* 36-bit physical address on the I/O space lines... * 36-bit physical address on the I/O space lines...
*/ */
...@@ -591,10 +654,10 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, ...@@ -591,10 +654,10 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
while(start < end) { while (start < end) {
pgdp = pgd_offset_k(start); pgdp = pgd_offset_k(start);
if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
pmdp = (pmd_t *) __srmmu_get_nocache( pmdp = __srmmu_get_nocache(
SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL) if (pmdp == NULL)
early_pgtable_allocfail("pmd"); early_pgtable_allocfail("pmd");
...@@ -602,8 +665,8 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, ...@@ -602,8 +665,8 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
pgd_set(__nocache_fix(pgdp), pmdp); pgd_set(__nocache_fix(pgdp), pmdp);
} }
pmdp = pmd_offset(__nocache_fix(pgdp), start); pmdp = pmd_offset(__nocache_fix(pgdp), start);
if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL) if (ptep == NULL)
early_pgtable_allocfail("pte"); early_pgtable_allocfail("pte");
memset(__nocache_fix(ptep), 0, PTE_SIZE); memset(__nocache_fix(ptep), 0, PTE_SIZE);
...@@ -622,18 +685,18 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start, ...@@ -622,18 +685,18 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
while(start < end) { while (start < end) {
pgdp = pgd_offset_k(start); pgdp = pgd_offset_k(start);
if (pgd_none(*pgdp)) { if (pgd_none(*pgdp)) {
pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL) if (pmdp == NULL)
early_pgtable_allocfail("pmd"); early_pgtable_allocfail("pmd");
memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(pgdp, pmdp); pgd_set(pgdp, pmdp);
} }
pmdp = pmd_offset(pgdp, start); pmdp = pmd_offset(pgdp, start);
if(srmmu_pmd_none(*pmdp)) { if (srmmu_pmd_none(*pmdp)) {
ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, ptep = __srmmu_get_nocache(PTE_SIZE,
PTE_SIZE); PTE_SIZE);
if (ptep == NULL) if (ptep == NULL)
early_pgtable_allocfail("pte"); early_pgtable_allocfail("pte");
...@@ -671,72 +734,76 @@ static inline unsigned long srmmu_probe(unsigned long vaddr) ...@@ -671,72 +734,76 @@ static inline unsigned long srmmu_probe(unsigned long vaddr)
static void __init srmmu_inherit_prom_mappings(unsigned long start, static void __init srmmu_inherit_prom_mappings(unsigned long start,
unsigned long end) unsigned long end)
{ {
unsigned long probed;
unsigned long addr;
pgd_t *pgdp; pgd_t *pgdp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
unsigned long prompte;
while(start <= end) { while (start <= end) {
if (start == 0) if (start == 0)
break; /* probably wrap around */ break; /* probably wrap around */
if(start == 0xfef00000) if (start == 0xfef00000)
start = KADB_DEBUGGER_BEGVM; start = KADB_DEBUGGER_BEGVM;
if(!(prompte = srmmu_probe(start))) { probed = srmmu_probe(start);
if (!probed) {
/* continue probing until we find an entry */
start += PAGE_SIZE; start += PAGE_SIZE;
continue; continue;
} }
/* A red snapper, see what it really is. */ /* A red snapper, see what it really is. */
what = 0; what = 0;
addr = start - PAGE_SIZE;
if(!(start & ~(SRMMU_REAL_PMD_MASK))) { if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
what = 1; what = 1;
} }
if(!(start & ~(SRMMU_PGDIR_MASK))) { if (!(start & ~(SRMMU_PGDIR_MASK))) {
if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
prompte)
what = 2; what = 2;
} }
pgdp = pgd_offset_k(start); pgdp = pgd_offset_k(start);
if(what == 2) { if (what == 2) {
*(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
start += SRMMU_PGDIR_SIZE; start += SRMMU_PGDIR_SIZE;
continue; continue;
} }
if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL) if (pmdp == NULL)
early_pgtable_allocfail("pmd"); early_pgtable_allocfail("pmd");
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(__nocache_fix(pgdp), pmdp); pgd_set(__nocache_fix(pgdp), pmdp);
} }
pmdp = pmd_offset(__nocache_fix(pgdp), start); pmdp = pmd_offset(__nocache_fix(pgdp), start);
if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
PTE_SIZE);
if (ptep == NULL) if (ptep == NULL)
early_pgtable_allocfail("pte"); early_pgtable_allocfail("pte");
memset(__nocache_fix(ptep), 0, PTE_SIZE); memset(__nocache_fix(ptep), 0, PTE_SIZE);
pmd_set(__nocache_fix(pmdp), ptep); pmd_set(__nocache_fix(pmdp), ptep);
} }
if(what == 1) { if (what == 1) {
/* /* We bend the rule where all 16 PTPs in a pmd_t point
* We bend the rule where all 16 PTPs in a pmd_t point
* inside the same PTE page, and we leak a perfectly * inside the same PTE page, and we leak a perfectly
* good hardware PTE piece. Alternatives seem worse. * good hardware PTE piece. Alternatives seem worse.
*/ */
unsigned int x; /* Index of HW PMD in soft cluster */ unsigned int x; /* Index of HW PMD in soft cluster */
unsigned long *val;
x = (start >> PMD_SHIFT) & 15; x = (start >> PMD_SHIFT) & 15;
*(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; val = &pmdp->pmdv[x];
*(unsigned long *)__nocache_fix(val) = probed;
start += SRMMU_REAL_PMD_SIZE; start += SRMMU_REAL_PMD_SIZE;
continue; continue;
} }
ptep = pte_offset_kernel(__nocache_fix(pmdp), start); ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
*(pte_t *)__nocache_fix(ptep) = __pte(prompte); *(pte_t *)__nocache_fix(ptep) = __pte(probed);
start += PAGE_SIZE; start += PAGE_SIZE;
} }
} }
...@@ -769,14 +836,14 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) ...@@ -769,14 +836,14 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
if (vend > max_vaddr || vend < min_vaddr) if (vend > max_vaddr || vend < min_vaddr)
vend = max_vaddr; vend = max_vaddr;
while(vstart < vend) { while (vstart < vend) {
do_large_mapping(vstart, pstart); do_large_mapping(vstart, pstart);
vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
} }
return vstart; return vstart;
} }
static inline void map_kernel(void) static void __init map_kernel(void)
{ {
int i; int i;
...@@ -789,9 +856,6 @@ static inline void map_kernel(void) ...@@ -789,9 +856,6 @@ static inline void map_kernel(void)
} }
} }
/* Paging initialization on the Sparc Reference MMU. */
extern void sparc_context_init(int);
void (*poke_srmmu)(void) __cpuinitdata = NULL; void (*poke_srmmu)(void) __cpuinitdata = NULL;
extern unsigned long bootmem_init(unsigned long *pages_avail); extern unsigned long bootmem_init(unsigned long *pages_avail);
...@@ -806,6 +870,7 @@ void __init srmmu_paging_init(void) ...@@ -806,6 +870,7 @@ void __init srmmu_paging_init(void)
pte_t *pte; pte_t *pte;
unsigned long pages_avail; unsigned long pages_avail;
init_mm.context = (unsigned long) NO_CONTEXT;
sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
if (sparc_cpu_model == sun4d) if (sparc_cpu_model == sun4d)
...@@ -814,9 +879,9 @@ void __init srmmu_paging_init(void) ...@@ -814,9 +879,9 @@ void __init srmmu_paging_init(void)
/* Find the number of contexts on the srmmu. */ /* Find the number of contexts on the srmmu. */
cpunode = prom_getchild(prom_root_node); cpunode = prom_getchild(prom_root_node);
num_contexts = 0; num_contexts = 0;
while(cpunode != 0) { while (cpunode != 0) {
prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
if(!strcmp(node_str, "cpu")) { if (!strcmp(node_str, "cpu")) {
num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
break; break;
} }
...@@ -824,7 +889,7 @@ void __init srmmu_paging_init(void) ...@@ -824,7 +889,7 @@ void __init srmmu_paging_init(void)
} }
} }
if(!num_contexts) { if (!num_contexts) {
prom_printf("Something wrong, can't find cpu node in paging_init.\n"); prom_printf("Something wrong, can't find cpu node in paging_init.\n");
prom_halt(); prom_halt();
} }
...@@ -834,14 +899,14 @@ void __init srmmu_paging_init(void) ...@@ -834,14 +899,14 @@ void __init srmmu_paging_init(void)
srmmu_nocache_calcsize(); srmmu_nocache_calcsize();
srmmu_nocache_init(); srmmu_nocache_init();
srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
map_kernel(); map_kernel();
/* ctx table has to be physically aligned to its size */ /* ctx table has to be physically aligned to its size */
srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
for(i = 0; i < num_contexts; i++) for (i = 0; i < num_contexts; i++)
srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
flush_cache_all(); flush_cache_all();
...@@ -908,10 +973,16 @@ void mmu_info(struct seq_file *m) ...@@ -908,10 +973,16 @@ void mmu_info(struct seq_file *m)
srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
} }
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = NO_CONTEXT;
return 0;
}
void destroy_context(struct mm_struct *mm) void destroy_context(struct mm_struct *mm)
{ {
if(mm->context != NO_CONTEXT) { if (mm->context != NO_CONTEXT) {
flush_cache_mm(mm); flush_cache_mm(mm);
srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
flush_tlb_mm(mm); flush_tlb_mm(mm);
...@@ -941,13 +1012,12 @@ static void __init init_vac_layout(void) ...@@ -941,13 +1012,12 @@ static void __init init_vac_layout(void)
#endif #endif
nd = prom_getchild(prom_root_node); nd = prom_getchild(prom_root_node);
while((nd = prom_getsibling(nd)) != 0) { while ((nd = prom_getsibling(nd)) != 0) {
prom_getstring(nd, "device_type", node_str, sizeof(node_str)); prom_getstring(nd, "device_type", node_str, sizeof(node_str));
if(!strcmp(node_str, "cpu")) { if (!strcmp(node_str, "cpu")) {
vac_line_size = prom_getint(nd, "cache-line-size"); vac_line_size = prom_getint(nd, "cache-line-size");
if (vac_line_size == -1) { if (vac_line_size == -1) {
prom_printf("can't determine cache-line-size, " prom_printf("can't determine cache-line-size, halting.\n");
"halting.\n");
prom_halt(); prom_halt();
} }
cache_lines = prom_getint(nd, "cache-nlines"); cache_lines = prom_getint(nd, "cache-nlines");
...@@ -958,9 +1028,9 @@ static void __init init_vac_layout(void) ...@@ -958,9 +1028,9 @@ static void __init init_vac_layout(void)
vac_cache_size = cache_lines * vac_line_size; vac_cache_size = cache_lines * vac_line_size;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if(vac_cache_size > max_size) if (vac_cache_size > max_size)
max_size = vac_cache_size; max_size = vac_cache_size;
if(vac_line_size < min_line_size) if (vac_line_size < min_line_size)
min_line_size = vac_line_size; min_line_size = vac_line_size;
//FIXME: cpus not contiguous!! //FIXME: cpus not contiguous!!
cpu++; cpu++;
...@@ -971,7 +1041,7 @@ static void __init init_vac_layout(void) ...@@ -971,7 +1041,7 @@ static void __init init_vac_layout(void)
#endif #endif
} }
} }
if(nd == 0) { if (nd == 0) {
prom_printf("No CPU nodes found, halting.\n"); prom_printf("No CPU nodes found, halting.\n");
prom_halt(); prom_halt();
} }
...@@ -1082,7 +1152,7 @@ static void __init init_swift(void) ...@@ -1082,7 +1152,7 @@ static void __init init_swift(void)
"=r" (swift_rev) : "=r" (swift_rev) :
"r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
srmmu_name = "Fujitsu Swift"; srmmu_name = "Fujitsu Swift";
switch(swift_rev) { switch (swift_rev) {
case 0x11: case 0x11:
case 0x20: case 0x20:
case 0x23: case 0x23:
...@@ -1222,7 +1292,8 @@ static void __cpuinit poke_turbosparc(void) ...@@ -1222,7 +1292,8 @@ static void __cpuinit poke_turbosparc(void)
/* Clear any crap from the cache or else... */ /* Clear any crap from the cache or else... */
turbosparc_flush_cache_all(); turbosparc_flush_cache_all();
mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ /* Temporarily disable I & D caches */
mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
srmmu_set_mmureg(mreg); srmmu_set_mmureg(mreg);
...@@ -1248,7 +1319,7 @@ static void __cpuinit poke_turbosparc(void) ...@@ -1248,7 +1319,7 @@ static void __cpuinit poke_turbosparc(void)
default: default:
ccreg |= (TURBOSPARC_SCENABLE); ccreg |= (TURBOSPARC_SCENABLE);
} }
turbosparc_set_ccreg (ccreg); turbosparc_set_ccreg(ccreg);
mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
...@@ -1342,7 +1413,7 @@ static void __cpuinit poke_viking(void) ...@@ -1342,7 +1413,7 @@ static void __cpuinit poke_viking(void)
unsigned long bpreg; unsigned long bpreg;
mreg &= ~(VIKING_TCENABLE); mreg &= ~(VIKING_TCENABLE);
if(smp_catch++) { if (smp_catch++) {
/* Must disable mixed-cmd mode here for other cpu's. */ /* Must disable mixed-cmd mode here for other cpu's. */
bpreg = viking_get_bpreg(); bpreg = viking_get_bpreg();
bpreg &= ~(VIKING_ACTION_MIX); bpreg &= ~(VIKING_ACTION_MIX);
...@@ -1411,7 +1482,7 @@ static void __init init_viking(void) ...@@ -1411,7 +1482,7 @@ static void __init init_viking(void)
unsigned long mreg = srmmu_get_mmureg(); unsigned long mreg = srmmu_get_mmureg();
/* Ahhh, the viking. SRMMU VLSI abortion number two... */ /* Ahhh, the viking. SRMMU VLSI abortion number two... */
if(mreg & VIKING_MMODE) { if (mreg & VIKING_MMODE) {
srmmu_name = "TI Viking"; srmmu_name = "TI Viking";
viking_mxcc_present = 0; viking_mxcc_present = 0;
msi_set_sync(); msi_set_sync();
...@@ -1467,8 +1538,8 @@ static void __init get_srmmu_type(void) ...@@ -1467,8 +1538,8 @@ static void __init get_srmmu_type(void)
} }
/* Second, check for HyperSparc or Cypress. */ /* Second, check for HyperSparc or Cypress. */
if(mod_typ == 1) { if (mod_typ == 1) {
switch(mod_rev) { switch (mod_rev) {
case 7: case 7:
/* UP or MP Hypersparc */ /* UP or MP Hypersparc */
init_hypersparc(); init_hypersparc();
...@@ -1489,8 +1560,7 @@ static void __init get_srmmu_type(void) ...@@ -1489,8 +1560,7 @@ static void __init get_srmmu_type(void)
return; return;
} }
/* /* Now Fujitsu TurboSparc. It might happen that it is
* Now Fujitsu TurboSparc. It might happen that it is
* in Swift emulation mode, so we will check later... * in Swift emulation mode, so we will check later...
*/ */
if (psr_typ == 0 && psr_vers == 5) { if (psr_typ == 0 && psr_vers == 5) {
...@@ -1499,15 +1569,15 @@ static void __init get_srmmu_type(void) ...@@ -1499,15 +1569,15 @@ static void __init get_srmmu_type(void)
} }
/* Next check for Fujitsu Swift. */ /* Next check for Fujitsu Swift. */
if(psr_typ == 0 && psr_vers == 4) { if (psr_typ == 0 && psr_vers == 4) {
phandle cpunode; phandle cpunode;
char node_str[128]; char node_str[128];
/* Look if it is not a TurboSparc emulating Swift... */ /* Look if it is not a TurboSparc emulating Swift... */
cpunode = prom_getchild(prom_root_node); cpunode = prom_getchild(prom_root_node);
while((cpunode = prom_getsibling(cpunode)) != 0) { while ((cpunode = prom_getsibling(cpunode)) != 0) {
prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
if(!strcmp(node_str, "cpu")) { if (!strcmp(node_str, "cpu")) {
if (!prom_getintdefault(cpunode, "psr-implementation", 1) && if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
prom_getintdefault(cpunode, "psr-version", 1) == 5) { prom_getintdefault(cpunode, "psr-version", 1) == 5) {
init_turbosparc(); init_turbosparc();
...@@ -1522,7 +1592,7 @@ static void __init get_srmmu_type(void) ...@@ -1522,7 +1592,7 @@ static void __init get_srmmu_type(void)
} }
/* Now the Viking family of srmmu. */ /* Now the Viking family of srmmu. */
if(psr_typ == 4 && if (psr_typ == 4 &&
((psr_vers == 0) || ((psr_vers == 0) ||
((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
init_viking(); init_viking();
...@@ -1530,7 +1600,7 @@ static void __init get_srmmu_type(void) ...@@ -1530,7 +1600,7 @@ static void __init get_srmmu_type(void)
} }
/* Finally the Tsunami. */ /* Finally the Tsunami. */
if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
init_tsunami(); init_tsunami();
return; return;
} }
......
...@@ -27,13 +27,10 @@ EXPORT_SYMBOL(prom_root_node); ...@@ -27,13 +27,10 @@ EXPORT_SYMBOL(prom_root_node);
struct linux_nodeops *prom_nodeops; struct linux_nodeops *prom_nodeops;
/* You must call prom_init() before you attempt to use any of the /* You must call prom_init() before you attempt to use any of the
* routines in the prom library. It returns 0 on success, 1 on * routines in the prom library.
* failure. It gets passed the pointer to the PROM vector. * It gets passed the pointer to the PROM vector.
*/ */
extern void prom_meminit(void);
extern void prom_ranges_init(void);
void __init prom_init(struct linux_romvec *rp) void __init prom_init(struct linux_romvec *rp)
{ {
romvec = rp; romvec = rp;
......
...@@ -22,8 +22,8 @@ int prom_stdout; ...@@ -22,8 +22,8 @@ int prom_stdout;
phandle prom_chosen_node; phandle prom_chosen_node;
/* You must call prom_init() before you attempt to use any of the /* You must call prom_init() before you attempt to use any of the
* routines in the prom library. It returns 0 on success, 1 on * routines in the prom library.
* failure. It gets passed the pointer to the PROM vector. * It gets passed the pointer to the PROM vector.
*/ */
extern void prom_cif_init(void *, void *); extern void prom_cif_init(void *, void *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment