Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
6b3efc2a
Commit
6b3efc2a
authored
Oct 29, 2002
by
Matthew Wilcox
Committed by
Linus Torvalds
Oct 29, 2002
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] arch/parisc/mm
Update arch/parisc/mm
parent
1e0b058c
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
998 additions
and
618 deletions
+998
-618
arch/parisc/mm/Makefile
arch/parisc/mm/Makefile
+1
-1
arch/parisc/mm/extable.c
arch/parisc/mm/extable.c
+2
-2
arch/parisc/mm/fault.c
arch/parisc/mm/fault.c
+42
-58
arch/parisc/mm/init.c
arch/parisc/mm/init.c
+775
-215
arch/parisc/mm/ioremap.c
arch/parisc/mm/ioremap.c
+178
-0
arch/parisc/mm/pa11.c
arch/parisc/mm/pa11.c
+0
-171
arch/parisc/mm/pa20.c
arch/parisc/mm/pa20.c
+0
-171
No files found.
arch/parisc/mm/Makefile
View file @
6b3efc2a
...
...
@@ -2,6 +2,6 @@
# Makefile for the linux parisc-specific parts of the memory manager.
#
obj
s-y
:=
init.o fault.o kmap.o extable
.o
obj
-y
:=
init.o fault.o extable.o ioremap
.o
include
$(TOPDIR)/Rules.make
arch/parisc/mm/extable.c
View file @
6b3efc2a
...
...
@@ -46,17 +46,17 @@ search_one_table (const struct exception_table_entry *first,
const
struct
exception_table_entry
*
search_exception_table
(
unsigned
long
addr
)
{
#ifndef CONFIG_MODULE
#ifndef CONFIG_MODULE
S
/* There is only the kernel to search. */
return
search_one_table
(
__start___ex_table
,
__stop___ex_table
-
1
,
addr
);
#else
struct
exception_table_entry
*
ret
;
/* The kernel is the last "module" -- no need to treat it special. */
struct
module
*
mp
;
for
(
mp
=
module_list
;
mp
;
mp
=
mp
->
next
)
{
const
struct
exception_table_entry
*
ret
;
if
(
!
mp
->
ex_table_start
)
continue
;
ret
=
search_one_table
(
mp
->
ex_table_start
,
mp
->
ex_table_end
-
1
,
...
...
arch/parisc/mm/fault.c
View file @
6b3efc2a
...
...
@@ -17,6 +17,10 @@
#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/traps.h>
#define PRINT_USER_FAULTS
/* (turn this on if you want user faults to be */
/* dumped to the console via printk) */
/* Defines for parisc_acctyp() */
...
...
@@ -114,59 +118,31 @@ parisc_acctyp(unsigned long code, unsigned int inst)
#undef isGraphicsFlushRead
#undef BITSSET
/* This is similar to expand_stack(), except that it is for stacks
* that grow upwards.
*/
static
inline
int
expand_stackup
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
)
{
unsigned
long
grow
;
address
+=
4
+
PAGE_SIZE
-
1
;
address
&=
PAGE_MASK
;
grow
=
(
address
-
vma
->
vm_end
)
>>
PAGE_SHIFT
;
if
(
address
-
vma
->
vm_start
>
current
->
rlim
[
RLIMIT_STACK
].
rlim_cur
||
((
vma
->
vm_mm
->
total_vm
+
grow
)
<<
PAGE_SHIFT
)
>
current
->
rlim
[
RLIMIT_AS
].
rlim_cur
)
return
-
ENOMEM
;
vma
->
vm_end
=
address
;
vma
->
vm_mm
->
total_vm
+=
grow
;
if
(
vma
->
vm_flags
&
VM_LOCKED
)
vma
->
vm_mm
->
locked_vm
+=
grow
;
return
0
;
}
/* This is similar to find_vma(), except that it understands that stacks
* grow up rather than down.
* XXX Optimise by making use of cache and avl tree as per find_vma().
#if 0
/* This is the treewalk to find a vma which is the highest that has
* a start < addr. We're using find_vma_prev instead right now, but
* we might want to use this at some point in the future. Probably
* not, but I want it committed to CVS so I don't lose it :-)
*/
struct
vm_area_struct
*
pa_find_vma
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
struct
vm_area_struct
*
vma
=
NULL
;
if
(
mm
)
{
vma
=
mm
->
mmap
;
if
(
!
vma
||
addr
<
vma
->
vm_start
)
return
NULL
;
while
(
vma
->
vm_next
&&
addr
>=
vma
->
vm_next
->
vm_start
)
vma
=
vma
->
vm_next
;
while (tree != vm_avl_empty) {
if (tree->vm_start > addr) {
tree = tree->vm_avl_left;
} else {
prev = tree;
if (prev->vm_next == NULL)
break;
if (prev->vm_next->vm_start > addr)
break;
tree = tree->vm_avl_right;
}
return
vma
;
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
extern
void
parisc_terminate
(
char
*
,
struct
pt_regs
*
,
int
,
unsigned
long
);
}
#endif
void
do_page_fault
(
struct
pt_regs
*
regs
,
unsigned
long
code
,
unsigned
long
address
)
{
struct
vm_area_struct
*
vma
;
struct
vm_area_struct
*
vma
,
*
prev_
vma
;
struct
task_struct
*
tsk
=
current
;
struct
mm_struct
*
mm
=
tsk
->
mm
;
const
struct
exception_table_entry
*
fix
;
...
...
@@ -176,13 +152,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
goto
no_context
;
down_read
(
&
mm
->
mmap_sem
);
vma
=
pa_find_vma
(
mm
,
address
);
if
(
!
vma
)
goto
bad_area
;
if
(
address
<
vma
->
vm_end
)
goto
good_area
;
if
(
!
(
vma
->
vm_flags
&
VM_GROWSUP
)
||
expand_stackup
(
vma
,
address
))
goto
bad_area
;
vma
=
find_vma_prev
(
mm
,
address
,
&
prev_vma
);
if
(
!
vma
||
address
<
vma
->
vm_start
)
goto
check_expansion
;
/*
* Ok, we have a good vm_area for this memory access. We still need to
* check the access permissions.
...
...
@@ -221,6 +193,11 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
up_read
(
&
mm
->
mmap_sem
);
return
;
check_expansion:
vma
=
prev_vma
;
if
(
vma
&&
(
expand_stack
(
vma
,
address
)
==
0
))
goto
good_area
;
/*
* Something tried to access memory that isn't in our memory map..
*/
...
...
@@ -230,9 +207,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
if
(
user_mode
(
regs
))
{
struct
siginfo
si
;
printk
(
"
\n
do_page_fault() pid=%d command='%s'
\n
"
,
tsk
->
pid
,
tsk
->
comm
);
#ifdef PRINT_USER_FAULTS
printk
(
KERN_DEBUG
"
\n
"
);
printk
(
KERN_DEBUG
"do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx
\n
"
,
tsk
->
pid
,
tsk
->
comm
,
code
,
address
);
if
(
vma
)
{
printk
(
KERN_DEBUG
"vm_start = 0x%08lx, vm_end = 0x%08lx
\n
"
,
vma
->
vm_start
,
vma
->
vm_end
);
}
show_regs
(
regs
);
#endif
/* FIXME: actually we need to get the signo and code correct */
si
.
si_signo
=
SIGSEGV
;
si
.
si_errno
=
0
;
...
...
@@ -272,11 +256,11 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
}
}
parisc_terminate
(
"Bad Address (null pointer deref?)"
,
regs
,
code
,
address
);
parisc_terminate
(
"Bad Address (null pointer deref?)"
,
regs
,
code
,
address
);
out_of_memory:
up_read
(
&
mm
->
mmap_sem
);
printk
(
"VM: killing process %s
\n
"
,
current
->
comm
);
printk
(
KERN_CRIT
"VM: killing process %s
\n
"
,
current
->
comm
);
if
(
user_mode
(
regs
))
do_exit
(
SIGKILL
);
goto
no_context
;
...
...
arch/parisc/mm/init.c
View file @
6b3efc2a
...
...
@@ -15,14 +15,406 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
/* for hppa_dma_ops and pcxl_dma_ops */
#include <linux/blk.h>
/* for initrd_start and initrd_end */
#include <linux/swap.h>
#include <linux/unistd.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
extern
unsigned
long
max_pfn
,
mem_max
;
mmu_gather_t
mmu_gathers
[
NR_CPUS
]
;
void
free_initmem
(
void
)
{
extern
char
_text
;
/* start of kernel code, defined by linker */
extern
int
data_start
;
extern
char
_end
;
/* end of BSS, defined by linker */
extern
char
__init_begin
,
__init_end
;
#ifdef CONFIG_DISCONTIGMEM
struct
node_map_data
node_data
[
MAX_PHYSMEM_RANGES
];
bootmem_data_t
bmem_data
[
MAX_PHYSMEM_RANGES
];
unsigned
char
*
chunkmap
;
unsigned
int
maxchunkmap
;
#endif
static
struct
resource
data_resource
=
{
name:
"Kernel data"
,
flags:
IORESOURCE_BUSY
|
IORESOURCE_MEM
,
};
static
struct
resource
code_resource
=
{
name:
"Kernel code"
,
flags:
IORESOURCE_BUSY
|
IORESOURCE_MEM
,
};
static
struct
resource
pdcdata_resource
=
{
name:
"PDC data (Page Zero)"
,
start:
0
,
end:
0x9ff
,
flags:
IORESOURCE_BUSY
|
IORESOURCE_MEM
,
};
static
struct
resource
sysram_resources
[
MAX_PHYSMEM_RANGES
];
static
unsigned
long
max_pfn
;
/* The following array is initialized from the firmware specific
* information retrieved in kernel/inventory.c.
*/
physmem_range_t
pmem_ranges
[
MAX_PHYSMEM_RANGES
];
int
npmem_ranges
;
#ifdef __LP64__
#define MAX_MEM (~0UL)
#else
/* !__LP64__ */
#define MAX_MEM (3584U*1024U*1024U)
#endif
/* !__LP64__ */
static
unsigned
long
mem_limit
=
MAX_MEM
;
static
void
__init
mem_limit_func
(
void
)
{
char
*
cp
,
*
end
;
unsigned
long
limit
;
extern
char
saved_command_line
[];
/* We need this before __setup() functions are called */
limit
=
MAX_MEM
;
for
(
cp
=
saved_command_line
;
*
cp
;
)
{
if
(
memcmp
(
cp
,
"mem="
,
4
)
==
0
)
{
cp
+=
4
;
limit
=
memparse
(
cp
,
&
end
);
if
(
end
!=
cp
)
break
;
cp
=
end
;
}
else
{
while
(
*
cp
!=
' '
&&
*
cp
)
++
cp
;
while
(
*
cp
==
' '
)
++
cp
;
}
}
if
(
limit
<
mem_limit
)
mem_limit
=
limit
;
}
#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
static
void
__init
setup_bootmem
(
void
)
{
unsigned
long
bootmap_size
;
unsigned
long
mem_max
;
unsigned
long
bootmap_pages
;
unsigned
long
bootmap_start_pfn
;
unsigned
long
bootmap_pfn
;
#ifndef CONFIG_DISCONTIGMEM
physmem_range_t
pmem_holes
[
MAX_PHYSMEM_RANGES
-
1
];
int
npmem_holes
;
#endif
int
i
,
sysram_resource_count
;
disable_sr_hashing
();
/* Turn off space register hashing */
#ifdef CONFIG_DISCONTIGMEM
/*
* The below is still true as of 2.4.2. If this is ever fixed,
* we can remove this warning!
*/
printk
(
KERN_WARNING
"
\n\n
"
);
printk
(
KERN_WARNING
"CONFIG_DISCONTIGMEM is enabled, which is probably a mistake. This
\n
"
);
printk
(
KERN_WARNING
"option can lead to heavy swapping, even when there are gigabytes
\n
"
);
printk
(
KERN_WARNING
"of free memory.
\n\n
"
);
#endif
#ifdef __LP64__
#ifndef CONFIG_DISCONTIGMEM
/*
* Sort the ranges. Since the number of ranges is typically
* small, and performance is not an issue here, just do
* a simple insertion sort.
*/
for
(
i
=
1
;
i
<
npmem_ranges
;
i
++
)
{
int
j
;
for
(
j
=
i
;
j
>
0
;
j
--
)
{
unsigned
long
tmp
;
if
(
pmem_ranges
[
j
-
1
].
start_pfn
<
pmem_ranges
[
j
].
start_pfn
)
{
break
;
}
tmp
=
pmem_ranges
[
j
-
1
].
start_pfn
;
pmem_ranges
[
j
-
1
].
start_pfn
=
pmem_ranges
[
j
].
start_pfn
;
pmem_ranges
[
j
].
start_pfn
=
tmp
;
tmp
=
pmem_ranges
[
j
-
1
].
pages
;
pmem_ranges
[
j
-
1
].
pages
=
pmem_ranges
[
j
].
pages
;
pmem_ranges
[
j
].
pages
=
tmp
;
}
}
/*
* Throw out ranges that are too far apart (controlled by
* MAX_GAP). If CONFIG_DISCONTIGMEM wasn't implemented so
* poorly, we would recommend enabling that option, but,
* until it is fixed, this is the best way to go.
*/
for
(
i
=
1
;
i
<
npmem_ranges
;
i
++
)
{
if
(
pmem_ranges
[
i
].
start_pfn
-
(
pmem_ranges
[
i
-
1
].
start_pfn
+
pmem_ranges
[
i
-
1
].
pages
)
>
MAX_GAP
)
{
npmem_ranges
=
i
;
break
;
}
}
#endif
if
(
npmem_ranges
>
1
)
{
/* Print the memory ranges */
printk
(
KERN_INFO
"Memory Ranges:
\n
"
);
for
(
i
=
0
;
i
<
npmem_ranges
;
i
++
)
{
unsigned
long
start
;
unsigned
long
size
;
size
=
(
pmem_ranges
[
i
].
pages
<<
PAGE_SHIFT
);
start
=
(
pmem_ranges
[
i
].
start_pfn
<<
PAGE_SHIFT
);
printk
(
KERN_INFO
"%2d) Start 0x%016lx End 0x%016lx Size %6ld Mb
\n
"
,
i
,
start
,
start
+
(
size
-
1
),
size
>>
20
);
}
}
#endif
/* __LP64__ */
#if 1
/* KLUGE! this really belongs in kernel/resource.c! */
iomem_resource
.
end
=
~
0UL
;
#endif
sysram_resource_count
=
npmem_ranges
;
for
(
i
=
0
;
i
<
sysram_resource_count
;
i
++
)
{
struct
resource
*
res
=
&
sysram_resources
[
i
];
res
->
name
=
"System RAM"
;
res
->
start
=
pmem_ranges
[
i
].
start_pfn
<<
PAGE_SHIFT
;
res
->
end
=
res
->
start
+
(
pmem_ranges
[
i
].
pages
<<
PAGE_SHIFT
)
-
1
;
res
->
flags
=
IORESOURCE_MEM
|
IORESOURCE_BUSY
;
request_resource
(
&
iomem_resource
,
res
);
}
/*
* For 32 bit kernels we limit the amount of memory we can
* support, in order to preserve enough kernel address space
* for other purposes. For 64 bit kernels we don't normally
* limit the memory, but this mechanism can be used to
* artificially limit the amount of memory (and it is written
* to work with multiple memory ranges).
*/
mem_limit_func
();
/* check for "mem=" argument */
mem_max
=
0
;
for
(
i
=
0
;
i
<
npmem_ranges
;
i
++
)
{
unsigned
long
rsize
;
rsize
=
pmem_ranges
[
i
].
pages
<<
PAGE_SHIFT
;
if
((
mem_max
+
rsize
)
>
mem_limit
)
{
printk
(
KERN_WARNING
"Memory truncated to %ld Mb
\n
"
,
mem_limit
>>
20
);
if
(
mem_max
==
mem_limit
)
npmem_ranges
=
i
;
else
{
pmem_ranges
[
i
].
pages
=
(
mem_limit
>>
PAGE_SHIFT
)
-
(
mem_max
>>
PAGE_SHIFT
);
npmem_ranges
=
i
+
1
;
mem_max
=
mem_limit
;
}
break
;
}
mem_max
+=
rsize
;
}
printk
(
KERN_INFO
"Total Memory: %ld Mb
\n
"
,
mem_max
>>
20
);
#ifndef CONFIG_DISCONTIGMEM
/* Merge the ranges, keeping track of the holes */
{
unsigned
long
end_pfn
;
unsigned
long
hole_pages
;
npmem_holes
=
0
;
end_pfn
=
pmem_ranges
[
0
].
start_pfn
+
pmem_ranges
[
0
].
pages
;
for
(
i
=
1
;
i
<
npmem_ranges
;
i
++
)
{
hole_pages
=
pmem_ranges
[
i
].
start_pfn
-
end_pfn
;
if
(
hole_pages
)
{
pmem_holes
[
npmem_holes
].
start_pfn
=
end_pfn
;
pmem_holes
[
npmem_holes
++
].
pages
=
hole_pages
;
end_pfn
+=
hole_pages
;
}
end_pfn
+=
pmem_ranges
[
i
].
pages
;
}
pmem_ranges
[
0
].
pages
=
end_pfn
-
pmem_ranges
[
0
].
start_pfn
;
npmem_ranges
=
1
;
}
#endif
bootmap_pages
=
0
;
for
(
i
=
0
;
i
<
npmem_ranges
;
i
++
)
bootmap_pages
+=
bootmem_bootmap_pages
(
pmem_ranges
[
i
].
pages
);
bootmap_start_pfn
=
PAGE_ALIGN
(
__pa
((
unsigned
long
)
&
_end
))
>>
PAGE_SHIFT
;
#ifdef CONFIG_DISCONTIGMEM
for
(
i
=
0
;
i
<
npmem_ranges
;
i
++
)
node_data
[
i
].
pg_data
.
bdata
=
&
bmem_data
[
i
];
#endif
/*
* Initialize and free the full range of memory in each range.
* Note that the only writing these routines do are to the bootmap,
* and we've made sure to locate the bootmap properly so that they
* won't be writing over anything important.
*/
bootmap_pfn
=
bootmap_start_pfn
;
max_pfn
=
0
;
for
(
i
=
0
;
i
<
npmem_ranges
;
i
++
)
{
unsigned
long
start_pfn
;
unsigned
long
npages
;
start_pfn
=
pmem_ranges
[
i
].
start_pfn
;
npages
=
pmem_ranges
[
i
].
pages
;
bootmap_size
=
init_bootmem_node
(
NODE_DATA
(
i
),
bootmap_pfn
,
start_pfn
,
(
start_pfn
+
npages
)
);
free_bootmem_node
(
NODE_DATA
(
i
),
(
start_pfn
<<
PAGE_SHIFT
),
(
npages
<<
PAGE_SHIFT
)
);
bootmap_pfn
+=
(
bootmap_size
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
if
((
start_pfn
+
npages
)
>
max_pfn
)
max_pfn
=
start_pfn
+
npages
;
}
if
((
bootmap_pfn
-
bootmap_start_pfn
)
!=
bootmap_pages
)
{
printk
(
KERN_WARNING
"WARNING! bootmap sizing is messed up!
\n
"
);
BUG
();
}
/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
#define PDC_CONSOLE_IO_IODC_SIZE 32768
reserve_bootmem_node
(
NODE_DATA
(
0
),
0UL
,
(
unsigned
long
)(
PAGE0
->
mem_free
+
PDC_CONSOLE_IO_IODC_SIZE
));
reserve_bootmem_node
(
NODE_DATA
(
0
),
__pa
((
unsigned
long
)
&
_text
),
(
unsigned
long
)(
&
_end
-
&
_text
));
reserve_bootmem_node
(
NODE_DATA
(
0
),
(
bootmap_start_pfn
<<
PAGE_SHIFT
),
((
bootmap_pfn
-
bootmap_start_pfn
)
<<
PAGE_SHIFT
));
#ifndef CONFIG_DISCONTIGMEM
/* reserve the holes */
for
(
i
=
0
;
i
<
npmem_holes
;
i
++
)
{
reserve_bootmem_node
(
NODE_DATA
(
0
),
(
pmem_holes
[
i
].
start_pfn
<<
PAGE_SHIFT
),
(
pmem_holes
[
i
].
pages
<<
PAGE_SHIFT
));
}
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if
(
initrd_start
)
{
printk
(
KERN_INFO
"initrd: %08lx-%08lx
\n
"
,
initrd_start
,
initrd_end
);
if
(
__pa
(
initrd_start
)
<
mem_max
)
{
unsigned
long
initrd_reserve
;
if
(
__pa
(
initrd_end
)
>
mem_max
)
{
initrd_reserve
=
mem_max
-
__pa
(
initrd_start
);
}
else
{
initrd_reserve
=
initrd_end
-
initrd_start
;
}
initrd_below_start_ok
=
1
;
printk
(
KERN_INFO
"initrd: reserving %08lx-%08lx (mem_max %08lx)
\n
"
,
__pa
(
initrd_start
),
__pa
(
initrd_start
)
+
initrd_reserve
,
mem_max
);
reserve_bootmem_node
(
NODE_DATA
(
0
),
__pa
(
initrd_start
),
initrd_reserve
);
}
}
#endif
data_resource
.
start
=
virt_to_phys
(
&
data_start
);
data_resource
.
end
=
virt_to_phys
(
&
_end
)
-
1
;
code_resource
.
start
=
virt_to_phys
(
&
_text
);
code_resource
.
end
=
virt_to_phys
(
&
data_start
)
-
1
;
/* We don't know which region the kernel will be in, so try
* all of them.
*/
for
(
i
=
0
;
i
<
sysram_resource_count
;
i
++
)
{
struct
resource
*
res
=
&
sysram_resources
[
i
];
request_resource
(
res
,
&
code_resource
);
request_resource
(
res
,
&
data_resource
);
}
request_resource
(
&
sysram_resources
[
0
],
&
pdcdata_resource
);
}
void
free_initmem
(
void
)
{
/* FIXME: */
#if 0
printk(KERN_INFO "NOT FREEING INITMEM (%dk)\n",
(&__init_end - &__init_begin) >> 10);
return;
#else
unsigned
long
addr
;
printk
(
KERN_INFO
"Freeing unused kernel memory: "
);
#if 1
/* Attempt to catch anyone trying to execute code here
* by filling the page with BRK insns.
*
* If we disable interrupts for all CPUs, then IPI stops working.
* Kinda breaks the global cache flushing.
*/
local_irq_disable
();
memset
(
&
__init_begin
,
0x00
,
(
unsigned
long
)
&
__init_end
-
(
unsigned
long
)
&
__init_begin
);
flush_data_cache
();
asm
volatile
(
"sync"
:
:
);
flush_icache_range
((
unsigned
long
)
&
__init_begin
,
(
unsigned
long
)
&
__init_end
);
asm
volatile
(
"sync"
:
:
);
local_irq_enable
();
#endif
addr
=
(
unsigned
long
)(
&
__init_begin
);
for
(;
addr
<
(
unsigned
long
)(
&
__init_end
);
addr
+=
PAGE_SIZE
)
{
ClearPageReserved
(
virt_to_page
(
addr
));
set_page_count
(
virt_to_page
(
addr
),
1
);
free_page
(
addr
);
num_physpages
++
;
totalram_pages
++
;
}
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status
(
PDC_CHASSIS_DIRECT_BCOMPLETE
);
printk
(
"%luk freed
\n
"
,
(
unsigned
long
)(
&
__init_end
-
&
__init_begin
)
>>
10
);
#endif
}
/*
...
...
@@ -36,67 +428,43 @@ void free_initmem(void) {
* a hole of 4kB between each vmalloced area for the same reason.
*/
#define MAP_START 0x4000
/* Leave room for gateway page expansion */
#define VM_MAP_OFFSET (32*1024)
#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
& ~(VM_MAP_OFFSET-1)))
void
*
vmalloc_start
;
#ifdef CONFIG_PA11
unsigned
long
pcxl_dma_start
;
#endif
void
__init
mem_init
(
void
)
{
max_mapnr
=
num_physpages
=
max_low_pfn
;
high_memory
=
__va
(
max_low_pfn
*
PAGE_SIZE
);
int
i
;
totalram_pages
+=
free_all_bootmem
(
);
printk
(
"Memory: %luk available
\n
"
,
totalram_pages
<<
(
PAGE_SHIFT
-
10
))
;
high_memory
=
__va
((
max_pfn
<<
PAGE_SHIFT
)
);
max_mapnr
=
(
virt_to_page
(
high_memory
-
1
)
-
mem_map
)
+
1
;
num_physpages
=
0
;
mem_map
=
zone_table
[
0
]
->
zone_mem_map
;
for
(
i
=
0
;
i
<
npmem_ranges
;
i
++
)
num_physpages
+=
free_all_bootmem_node
(
NODE_DATA
(
i
));
totalram_pages
=
num_physpages
;
printk
(
KERN_INFO
"Memory: %luk available
\n
"
,
num_physpages
<<
(
PAGE_SHIFT
-
10
));
#ifdef CONFIG_PA11
if
(
hppa_dma_ops
==
&
pcxl_dma_ops
)
{
pcxl_dma_start
=
(
unsigned
long
)
SET_MAP_OFFSET
(
high_memory
);
pcxl_dma_start
=
(
unsigned
long
)
SET_MAP_OFFSET
(
MAP_START
);
vmalloc_start
=
SET_MAP_OFFSET
(
pcxl_dma_start
+
PCXL_DMA_MAP_SIZE
);
}
else
{
}
else
{
pcxl_dma_start
=
0
;
vmalloc_start
=
SET_MAP_OFFSET
(
high_memory
);
}
}
void
__bad_pgd
(
pgd_t
*
pgd
)
{
printk
(
"Bad pgd in pmd_alloc: %08lx
\n
"
,
pgd_val
(
*
pgd
));
pgd_val
(
*
pgd
)
=
_PAGE_TABLE
+
__pa
(
BAD_PAGETABLE
);
}
void
__bad_pmd
(
pmd_t
*
pmd
)
{
printk
(
"Bad pmd in pte_alloc: %08lx
\n
"
,
pmd_val
(
*
pmd
));
pmd_val
(
*
pmd
)
=
_PAGE_TABLE
+
__pa
(
BAD_PAGETABLE
);
}
pte_t
*
get_pte_slow
(
pmd_t
*
pmd
,
unsigned
long
offset
)
{
pte_t
*
pte
;
pte
=
(
pte_t
*
)
__get_free_page
(
GFP_KERNEL
);
if
(
pmd_none
(
*
pmd
))
{
if
(
pte
)
{
clear_page
(
pte
);
pmd_val
(
*
pmd
)
=
_PAGE_TABLE
+
__pa
((
unsigned
long
)
pte
);
return
pte
+
offset
;
}
pmd_val
(
*
pmd
)
=
_PAGE_TABLE
+
__pa
(
BAD_PAGETABLE
);
return
NULL
;
}
free_page
((
unsigned
long
)
pte
);
if
(
pmd_bad
(
*
pmd
))
{
__bad_pmd
(
pmd
);
return
NULL
;
vmalloc_start
=
SET_MAP_OFFSET
(
MAP_START
);
}
#else
vmalloc_start
=
SET_MAP_OFFSET
(
MAP_START
);
#endif
return
(
pte_t
*
)
pmd_page
(
*
pmd
)
+
offset
;
}
int
do_check_pgt_cache
(
int
low
,
int
high
)
...
...
@@ -104,40 +472,16 @@ int do_check_pgt_cache(int low, int high)
return
0
;
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving an inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
pte_t
*
__bad_pagetable
(
void
)
{
return
(
pte_t
*
)
NULL
;
}
unsigned
long
*
empty_zero_page
;
unsigned
long
*
empty_bad_page
;
pte_t
__bad_page
(
void
)
{
return
*
(
pte_t
*
)
NULL
;
}
void
show_mem
(
void
)
{
int
i
,
free
=
0
,
total
=
0
,
reserved
=
0
;
int
shared
=
0
,
cached
=
0
;
printk
(
"Mem-info:
\n
"
);
printk
(
KERN_INFO
"Mem-info:
\n
"
);
show_free_areas
();
printk
(
"Free swap: %6dkB
\n
"
,
nr_swap_pages
<<
(
PAGE_SHIFT
-
10
));
printk
(
KERN_INFO
"Free swap: %6dkB
\n
"
,
nr_swap_pages
<<
(
PAGE_SHIFT
-
10
));
i
=
max_mapnr
;
while
(
i
--
>
0
)
{
total
++
;
...
...
@@ -150,53 +494,49 @@ void show_mem(void)
else
shared
+=
atomic_read
(
&
mem_map
[
i
].
count
)
-
1
;
}
printk
(
"%d pages of RAM
\n
"
,
total
);
printk
(
"%d reserved pages
\n
"
,
reserved
);
printk
(
"%d pages shared
\n
"
,
shared
);
printk
(
"%d pages swap cached
\n
"
,
cached
);
printk
(
KERN_INFO
"%d pages of RAM
\n
"
,
total
);
printk
(
KERN_INFO
"%d reserved pages
\n
"
,
reserved
);
printk
(
KERN_INFO
"%d pages shared
\n
"
,
shared
);
printk
(
KERN_INFO
"%d pages swap cached
\n
"
,
cached
);
}
void
set_pte_phys
(
unsigned
long
vaddr
,
unsigned
long
phys
)
{
}
/*
* pagetable_init() sets up the page tables
*
* Note that gateway_init() places the Linux gateway page at page 0.
* Since gateway pages cannot be dereferenced this has the desirable
* side effect of trapping those pesky NULL-reference errors in the
* kernel.
*/
static
void
__init
pagetable_init
(
void
)
static
void
__init
map_pages
(
unsigned
long
start_vaddr
,
unsigned
long
start_paddr
,
unsigned
long
size
,
pgprot_t
pgprot
)
{
pgd_t
*
pg_dir
;
pmd_t
*
pmd
;
pte_t
*
pg_table
;
unsigned
long
end_paddr
;
unsigned
long
start_pmd
;
unsigned
long
start_pte
;
unsigned
long
tmp1
;
unsigned
long
tmp2
;
unsigned
long
address
;
unsigned
long
ro_start
;
unsigned
long
ro_end
;
unsigned
long
fv_addr
;
extern
const
int
stext
;
extern
int
data_start
;
unsigned
long
gw_addr
;
extern
const
unsigned
long
fault_vector_20
;
extern
void
*
const
linux_gateway_page
;
ro_start
=
__pa
((
unsigned
long
)
&
s
text
);
ro_start
=
__pa
((
unsigned
long
)
&
_
text
);
ro_end
=
__pa
((
unsigned
long
)
&
data_start
);
fv_addr
=
__pa
((
unsigned
long
)
&
fault_vector_20
)
&
PAGE_MASK
;
gw_addr
=
__pa
((
unsigned
long
)
&
linux_gateway_page
)
&
PAGE_MASK
;
printk
(
"pagetable_init
\n
"
)
;
end_paddr
=
start_paddr
+
size
;
/* Map whole memory from PAGE_OFFSET */
pg_dir
=
(
pgd_t
*
)
swapper_pg_dir
+
USER_PGD_PTRS
;
pg_dir
=
pgd_offset_k
(
start_vaddr
);
address
=
0
;
while
(
address
<
mem_max
)
{
/* XXX: BTLB should be done here */
#if PTRS_PER_PMD == 1
start_pmd
=
0
;
#else
start_pmd
=
((
start_vaddr
>>
PMD_SHIFT
)
&
(
PTRS_PER_PMD
-
1
));
#endif
start_pte
=
((
start_vaddr
>>
PAGE_SHIFT
)
&
(
PTRS_PER_PTE
-
1
));
address
=
start_paddr
;
while
(
address
<
end_paddr
)
{
#if PTRS_PER_PMD == 1
pmd
=
(
pmd_t
*
)
__pa
(
pg_dir
);
#else
...
...
@@ -207,7 +547,7 @@ static void __init pagetable_init(void)
*/
if
(
!
pmd
)
{
pmd
=
(
pmd_t
*
)
alloc_bootmem_low_pages
(
PAGE_SIZE
);
pmd
=
(
pmd_t
*
)
alloc_bootmem_low_pages
_node
(
NODE_DATA
(
0
),
PAGE_SIZE
);
pmd
=
(
pmd_t
*
)
__pa
(
pmd
);
}
...
...
@@ -217,8 +557,8 @@ static void __init pagetable_init(void)
/* now change pmd to kernel virtual addresses */
pmd
=
(
pmd_t
*
)
__va
(
pmd
)
;
for
(
tmp1
=
0
;
tmp1
<
PTRS_PER_PMD
;
tmp1
++
,
pmd
++
)
{
pmd
=
(
pmd_t
*
)
__va
(
pmd
)
+
start_pmd
;
for
(
tmp1
=
start_pmd
;
tmp1
<
PTRS_PER_PMD
;
tmp1
++
,
pmd
++
)
{
/*
* pg_table is physical at this point
...
...
@@ -227,7 +567,7 @@ static void __init pagetable_init(void)
pg_table
=
(
pte_t
*
)
(
PAGE_MASK
&
pmd_val
(
*
pmd
));
if
(
!
pg_table
)
{
pg_table
=
(
pte_t
*
)
alloc_bootmem_low_pages
(
PAGE_SIZE
);
alloc_bootmem_low_pages
_node
(
NODE_DATA
(
0
),
PAGE_SIZE
);
pg_table
=
(
pte_t
*
)
__pa
(
pg_table
);
}
...
...
@@ -236,64 +576,90 @@ static void __init pagetable_init(void)
/* now change pg_table to kernel virtual addresses */
pg_table
=
(
pte_t
*
)
__va
(
pg_table
);
for
(
tmp2
=
0
;
tmp2
<
PTRS_PER_PTE
;
tmp2
++
,
pg_table
++
)
{
pg_table
=
(
pte_t
*
)
__va
(
pg_table
)
+
start_pte
;
for
(
tmp2
=
start_pte
;
tmp2
<
PTRS_PER_PTE
;
tmp2
++
,
pg_table
++
)
{
pte_t
pte
;
#if !defined(CONFIG_
KWDB) && !defined(CONFIG_
STI_CONSOLE)
#if !defined(CONFIG_STI_CONSOLE)
#warning STI console should explicitly allocate executable pages but does not
/* KWDB needs to write kernel text when setting break points.
**
** The right thing to do seems like KWDB modify only the pte which
** has a break point on it...otherwise we might mask worse bugs.
*/
/*
* Map the fault vector writable so we can
* write the HPMC checksum.
*/
if
(
address
>=
ro_start
&&
address
<
ro_end
&&
address
!=
fv_addr
)
&&
address
!=
fv_addr
&&
address
!=
gw_addr
)
pte
=
__mk_pte
(
address
,
PAGE_KERNEL_RO
);
else
#endif
pte
=
__mk_pte
(
address
,
PAGE_KERNEL
);
pte
=
__mk_pte
(
address
,
pgprot
);
if
(
address
>=
mem_max
)
if
(
address
>=
end_paddr
)
pte_val
(
pte
)
=
0
;
set_pte
(
pg_table
,
pte
);
address
+=
PAGE_SIZE
;
}
start_pte
=
0
;
if
(
address
>=
mem_max
)
if
(
address
>=
end_paddr
)
break
;
}
start_pmd
=
0
;
}
}
/*
* pagetable_init() sets up the page tables
*
* Note that gateway_init() places the Linux gateway page at page 0.
* Since gateway pages cannot be dereferenced this has the desirable
* side effect of trapping those pesky NULL-reference errors in the
* kernel.
*/
static
void
__init
pagetable_init
(
void
)
{
int
range
;
printk
(
"pagetable_init
\n
"
);
/* Map each physical memory range to its kernel vaddr */
for
(
range
=
0
;
range
<
npmem_ranges
;
range
++
)
{
unsigned
long
start_paddr
;
unsigned
long
end_paddr
;
unsigned
long
size
;
start_paddr
=
pmem_ranges
[
range
].
start_pfn
<<
PAGE_SHIFT
;
end_paddr
=
start_paddr
+
(
pmem_ranges
[
range
].
pages
<<
PAGE_SHIFT
);
size
=
pmem_ranges
[
range
].
pages
<<
PAGE_SHIFT
;
map_pages
((
unsigned
long
)
__va
(
start_paddr
),
start_paddr
,
size
,
PAGE_KERNEL
);
}
#ifdef CONFIG_BLK_DEV_INITRD
if
(
initrd_end
&&
initrd_end
>
mem_limit
)
{
printk
(
"initrd: mapping %08lx-%08lx
\n
"
,
initrd_start
,
initrd_end
);
map_pages
(
initrd_start
,
__pa
(
initrd_start
),
initrd_end
-
initrd_start
,
PAGE_KERNEL
);
}
#endif
empty_zero_page
=
alloc_bootmem_pages
(
PAGE_SIZE
);
memset
(
empty_zero_page
,
0
,
PAGE_SIZE
);
}
unsigned
long
gateway_pgd_offset
;
unsigned
long
gateway_pgd_entry
;
static
void
__init
gateway_init
(
void
)
{
unsigned
long
hpux_gateway_page_addr
;
unsigned
long
linux_gateway_page_addr
;
pgd_t
*
pg_dir
;
pmd_t
*
pmd_base
;
pmd_t
*
pmd
;
pte_t
*
pg_table_base
;
pte_t
*
pg_table
;
/* FIXME: These are 'const' in order to trick the compiler
into not treating them as DP-relative data. */
extern
void
*
const
hpux_gateway_page
;
/* FIXME: This is 'const' in order to trick the compiler
into not treating it as DP-relative data. */
extern
void
*
const
linux_gateway_page
;
pte_t
pte
;
hpux_gateway_page_addr
=
HPUX_GATEWAY_ADDR
&
PAGE_MASK
;
linux_gateway_page_addr
=
LINUX_GATEWAY_ADDR
&
PAGE_MASK
;
gateway_pgd_offset
=
hpux_gateway_page_addr
>>
PGDIR_SHIFT
;
/*
* Setup Linux Gateway page.
*
...
...
@@ -301,151 +667,345 @@ static void __init gateway_init(void)
* page 0), so it doesn't need to be aliased into user space.
*/
pg_dir
=
(
pgd_t
*
)
swapper_pg_dir
;
#if PTRS_PER_PMD == 1
pmd_base
=
(
pmd_t
*
)
pg_dir
;
pmd
=
pmd_base
+
((
linux_gateway_page_addr
)
>>
PGDIR_SHIFT
);
#else
pmd_base
=
(
pmd_t
*
)
alloc_bootmem_pages
(
PAGE_SIZE
);
pgd_val
(
*
(
pg_dir
+
(
linux_gateway_page_addr
>>
PGDIR_SHIFT
)))
=
_PAGE_TABLE
|
__pa
(
pmd_base
);
map_pages
(
linux_gateway_page_addr
,
__pa
(
&
linux_gateway_page
),
PAGE_SIZE
,
PAGE_GATEWAY
);
}
pmd
=
pmd_base
+
((
linux_gateway_page_addr
&
(
PMD_MASK
)
&
(
PGDIR_SIZE
-
1
))
>>
PMD_SHIFT
);
#endif
void
map_hpux_gateway_page
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
{
pgd_t
*
pg_dir
;
pmd_t
*
pmd
;
pte_t
*
pg_table
;
unsigned
long
start_pmd
;
unsigned
long
start_pte
;
unsigned
long
address
;
unsigned
long
hpux_gw_page_addr
;
/* FIXME: This is 'const' in order to trick the compiler
into not treating it as DP-relative data. */
extern
void
*
const
hpux_gateway_page
;
pg_table_base
=
(
pte_t
*
)
alloc_bootmem_pages
(
PAGE_SIZE
)
;
hpux_gw_page_addr
=
HPUX_GATEWAY_ADDR
&
PAGE_MASK
;
pmd_val
(
*
pmd
)
=
_PAGE_TABLE
|
__pa
(
pg_table_base
);
/*
* Setup HP-UX Gateway page.
*
* The HP-UX gateway page resides in the user address space,
* so it needs to be aliased into each process.
*/
p
te
=
__mk_pte
(
__pa
(
&
linux_gateway_page
),
PAGE_GATEWAY
);
p
g_dir
=
pgd_offset
(
mm
,
hpux_gw_page_addr
);
pg_table
=
pg_table_base
+
((
linux_gateway_page_addr
&
(
PAGE_MASK
)
&
(
PMD_SIZE
-
1
))
>>
PAGE_SHIFT
);
#if PTRS_PER_PMD == 1
start_pmd
=
0
;
#else
start_pmd
=
((
hpux_gw_page_addr
>>
PMD_SHIFT
)
&
(
PTRS_PER_PMD
-
1
));
#endif
start_pte
=
((
hpux_gw_page_addr
>>
PAGE_SHIFT
)
&
(
PTRS_PER_PTE
-
1
));
set_pte
(
pg_table
,
pte
);
address
=
__pa
(
&
hpux_gateway_page
);
#if PTRS_PER_PMD == 1
pmd
=
(
pmd_t
*
)
__pa
(
pg_dir
);
#else
pmd
=
(
pmd_t
*
)
(
PAGE_MASK
&
pgd_val
(
*
pg_dir
));
/*
* Setup HP-UX gateway page.
* This page will be aliased into each user address space.
* pmd is physical at this point
*/
pg_table_base
=
(
pte_t
*
)
alloc_bootmem_pages
(
PAGE_SIZE
);
pte
=
__mk_pte
(
__pa
(
&
hpux_gateway_page
),
PAGE_GATEWAY
);
pg_table
=
pg_table_base
+
((
hpux_gateway_page_addr
&
(
PAGE_MASK
)
&
(
PMD_SIZE
-
1
))
>>
PAGE_SHIFT
);
set_pte
(
pg_table
,
pte
);
if
(
!
pmd
)
{
pmd
=
(
pmd_t
*
)
get_zeroed_page
(
GFP_KERNEL
);
pmd
=
(
pmd_t
*
)
__pa
(
pmd
);
}
#if PTRS_PER_PMD == 1
pmd_base
=
(
pmd_t
*
)
pg_table_base
;
#else
pmd_base
=
(
pmd_t
*
)
alloc_bootmem_pages
(
PAGE_SIZE
);
pmd
=
pmd_base
+
((
hpux_gateway_page_addr
&
(
PMD_MASK
)
&
(
PGDIR_SIZE
-
1
))
>>
PMD_SHIFT
);
pmd_val
(
*
pmd
)
=
_PAGE_TABLE
|
__pa
(
pg_table_base
);
pgd_val
(
*
pg_dir
)
=
_PAGE_TABLE
|
(
unsigned
long
)
pmd
;
#endif
/* now change pmd to kernel virtual addresses */
gateway_pgd_entry
=
_PAGE_TABLE
|
__pa
(
pmd_base
)
;
pmd
=
(
pmd_t
*
)
__va
(
pmd
)
+
start_pmd
;
/*
* We will be aliasing the HP-UX gateway page into all HP-UX
* user spaces at the same address (not counting the space register
* value) that will be equivalently mapped as long as space register
* hashing is disabled. It will be a problem if anyone touches
* the gateway pages at its "kernel" address, since that is
* NOT equivalently mapped. We'll flush the caches at this
* point, just in case some code has touched those addresses
* previous to this, but all bets are off if they get touched
* after this point.
* pg_table is physical at this point
*/
flush_all_caches
();
pg_table
=
(
pte_t
*
)
(
PAGE_MASK
&
pmd_val
(
*
pmd
));
if
(
!
pg_table
)
pg_table
=
(
pte_t
*
)
__pa
(
get_zeroed_page
(
GFP_KERNEL
));
return
;
pmd_val
(
*
pmd
)
=
_PAGE_TABLE
|
(
unsigned
long
)
pg_table
;
/* now change pg_table to kernel virtual addresses */
pg_table
=
(
pte_t
*
)
__va
(
pg_table
)
+
start_pte
;
set_pte
(
pg_table
,
__mk_pte
(
address
,
PAGE_GATEWAY
));
}
extern
void
flush_tlb_all_local
(
void
);
void
__init
paging_init
(
void
)
{
int
i
;
setup_bootmem
();
pagetable_init
();
gateway_init
();
flush_cache_all_local
();
/* start with known state */
flush_tlb_all_local
();
{
unsigned
long
zones_size
[
MAX_NR_ZONES
]
=
{
max_pfn
/
2
,
max_pfn
/
2
,
};
for
(
i
=
0
;
i
<
npmem_ranges
;
i
++
)
{
unsigned
long
zones_size
[
MAX_NR_ZONES
]
=
{
0
,
0
,
0
,
};
free_area_init
(
zones_size
);
zones_size
[
ZONE_DMA
]
=
pmem_ranges
[
i
].
pages
;
free_area_init_node
(
i
,
NODE_DATA
(
i
),
NULL
,
zones_size
,
(
pmem_ranges
[
i
].
start_pfn
<<
PAGE_SHIFT
),
0
);
}
#ifdef CONFIG_DISCONTIGMEM
/*
* Initialize support for virt_to_page() macro.
*
* Note that MAX_ADDRESS is the largest virtual address that
* we can map. However, since we map all physical memory into
* the kernel address space, it also has an effect on the maximum
* physical address we can map (MAX_ADDRESS - PAGE_OFFSET).
*/
maxchunkmap
=
MAX_ADDRESS
>>
CHUNKSHIFT
;
chunkmap
=
(
unsigned
char
*
)
alloc_bootmem
(
maxchunkmap
);
for
(
i
=
0
;
i
<
maxchunkmap
;
i
++
)
chunkmap
[
i
]
=
BADCHUNK
;
for
(
i
=
0
;
i
<
npmem_ranges
;
i
++
)
{
ADJ_NODE_MEM_MAP
(
i
)
=
NODE_MEM_MAP
(
i
)
-
pmem_ranges
[
i
].
start_pfn
;
{
unsigned
long
chunk_paddr
;
unsigned
long
end_paddr
;
int
chunknum
;
chunk_paddr
=
(
pmem_ranges
[
i
].
start_pfn
<<
PAGE_SHIFT
);
end_paddr
=
chunk_paddr
+
(
pmem_ranges
[
i
].
pages
<<
PAGE_SHIFT
);
chunk_paddr
&=
CHUNKMASK
;
chunknum
=
(
int
)
CHUNKNUM
(
chunk_paddr
);
while
(
chunk_paddr
<
end_paddr
)
{
if
(
chunknum
>=
maxchunkmap
)
goto
badchunkmap1
;
if
(
chunkmap
[
chunknum
]
!=
BADCHUNK
)
goto
badchunkmap2
;
chunkmap
[
chunknum
]
=
(
unsigned
char
)
i
;
chunk_paddr
+=
CHUNKSZ
;
chunknum
++
;
}
}
}
return
;
badchunkmap1:
panic
(
"paging_init: Physical address exceeds maximum address space!
\n
"
);
badchunkmap2:
panic
(
"paging_init: Collision in chunk map array. CHUNKSZ needs to be smaller
\n
"
);
#endif
}
#
define NR_SPACE_IDS 8192
#
ifdef CONFIG_PA20
static
unsigned
long
space_id
[
NR_SPACE_IDS
/
(
8
*
sizeof
(
long
))];
static
unsigned
long
space_id_index
;
static
unsigned
long
free_space_ids
=
NR_SPACE_IDS
;
/*
* Currently, all PA20 chips have 18 bit protection id's, which is the
* limiting factor (space ids are 32 bits).
*/
#define NR_SPACE_IDS 262144
#else
/*
* XXX: We should probably unfold the set_bit / test_bit / clear_bit
* locking out of these two functions and have a single spinlock on the
* space_id data structures.
*
* Don't bother. This is all going to be significantly changed in the
* very near future.
* Currently we have a one-to-one relationship between space id's and
* protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
* support 15 bit protection id's, so that is the limiting factor.
* PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's
* probably not worth the effort for a special case here.
*/
#define SPACEID_SHIFT (PAGE_SHIFT + (PT_NLEVELS)*(PAGE_SHIFT - PT_NLEVELS) - 32)
#define NR_SPACE_IDS 32768
#endif
/* !CONFIG_PA20 */
#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
static
unsigned
long
space_id
[
SID_ARRAY_SIZE
]
=
{
1
};
/* disallow space 0 */
static
unsigned
long
dirty_space_id
[
SID_ARRAY_SIZE
];
static
unsigned
long
space_id_index
;
static
unsigned
long
free_space_ids
=
NR_SPACE_IDS
-
1
;
static
unsigned
long
dirty_space_ids
=
0
;
static
spinlock_t
sid_lock
=
SPIN_LOCK_UNLOCKED
;
unsigned
long
alloc_sid
(
void
)
{
unsigned
long
index
;
spin_lock
(
&
sid_lock
);
if
(
free_space_ids
==
0
)
{
if
(
dirty_space_ids
!=
0
)
{
spin_unlock
(
&
sid_lock
);
flush_tlb_all
();
/* flush_tlb_all() calls recycle_sids() */
spin_lock
(
&
sid_lock
);
}
if
(
free_space_ids
==
0
)
BUG
();
}
free_space_ids
--
;
do
{
index
=
find_next_zero_bit
(
space_id
,
NR_SPACE_IDS
,
space_id_index
);
}
while
(
test_and_set_bit
(
index
,
space_id
));
space_id
[
index
>>
SHIFT_PER_LONG
]
|=
(
1L
<<
(
index
&
(
BITS_PER_LONG
-
1
)));
space_id_index
=
index
;
spin_unlock
(
&
sid_lock
);
return
index
<<
SPACEID_SHIFT
;
}
void
free_sid
(
unsigned
long
spaceid
)
{
unsigned
long
index
=
spaceid
>>
SPACEID_SHIFT
;
if
(
index
<
0
)
BUG
();
unsigned
long
*
dirty_space_offset
;
clear_bit
(
index
,
space_id
);
dirty_space_offset
=
dirty_space_id
+
(
index
>>
SHIFT_PER_LONG
);
index
&=
(
BITS_PER_LONG
-
1
);
if
(
space_id_index
>
index
)
{
space_id_index
=
index
;
spin_lock
(
&
sid_lock
);
if
(
*
dirty_space_offset
&
(
1L
<<
index
))
BUG
();
/* attempt to free space id twice */
*
dirty_space_offset
|=
(
1L
<<
index
);
dirty_space_ids
++
;
spin_unlock
(
&
sid_lock
);
}
#ifdef CONFIG_SMP
static
void
get_dirty_sids
(
unsigned
long
*
ndirtyptr
,
unsigned
long
*
dirty_array
)
{
int
i
;
/* NOTE: sid_lock must be held upon entry */
*
ndirtyptr
=
dirty_space_ids
;
if
(
dirty_space_ids
!=
0
)
{
for
(
i
=
0
;
i
<
SID_ARRAY_SIZE
;
i
++
)
{
dirty_array
[
i
]
=
dirty_space_id
[
i
];
dirty_space_id
[
i
]
=
0
;
}
dirty_space_ids
=
0
;
}
return
;
}
static
void
recycle_sids
(
unsigned
long
ndirty
,
unsigned
long
*
dirty_array
)
{
int
i
;
/* NOTE: sid_lock must be held upon entry */
if
(
ndirty
!=
0
)
{
for
(
i
=
0
;
i
<
SID_ARRAY_SIZE
;
i
++
)
{
space_id
[
i
]
^=
dirty_array
[
i
];
}
free_space_ids
+=
ndirty
;
space_id_index
=
0
;
}
}
#else
/* CONFIG_SMP */
static
void
recycle_sids
(
void
)
{
int
i
;
/* NOTE: sid_lock must be held upon entry */
if
(
dirty_space_ids
!=
0
)
{
for
(
i
=
0
;
i
<
SID_ARRAY_SIZE
;
i
++
)
{
space_id
[
i
]
^=
dirty_space_id
[
i
];
dirty_space_id
[
i
]
=
0
;
}
free_space_ids
+=
dirty_space_ids
;
dirty_space_ids
=
0
;
space_id_index
=
0
;
}
free_space_ids
++
;
}
#endif
/*
* flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
* purged, we can safely reuse the space ids that were released but
* not flushed from the tlb.
*/
#ifdef CONFIG_SMP
static
unsigned
long
recycle_ndirty
;
static
unsigned
long
recycle_dirty_array
[
SID_ARRAY_SIZE
];
static
unsigned
int
recycle_inuse
=
0
;
void
flush_tlb_all
(
void
)
{
int
do_recycle
;
do_recycle
=
0
;
spin_lock
(
&
sid_lock
);
if
(
dirty_space_ids
>
RECYCLE_THRESHOLD
)
{
if
(
recycle_inuse
)
{
BUG
();
/* FIXME: Use a semaphore/wait queue here */
}
get_dirty_sids
(
&
recycle_ndirty
,
recycle_dirty_array
);
recycle_inuse
++
;
do_recycle
++
;
}
spin_unlock
(
&
sid_lock
);
smp_call_function
((
void
(
*
)(
void
*
))
flush_tlb_all_local
,
NULL
,
1
,
1
);
flush_tlb_all_local
();
if
(
do_recycle
)
{
spin_lock
(
&
sid_lock
);
recycle_sids
(
recycle_ndirty
,
recycle_dirty_array
);
recycle_inuse
=
0
;
spin_unlock
(
&
sid_lock
);
}
}
#else
void
flush_tlb_all
(
void
)
{
spin_lock
(
&
sid_lock
);
flush_tlb_all_local
();
recycle_sids
();
spin_unlock
(
&
sid_lock
);
}
#endif
#ifdef CONFIG_BLK_DEV_INITRD
void
free_initrd_mem
(
unsigned
long
start
,
unsigned
long
end
)
{
#if 0
if (start < end)
printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(
mem_map + MAP_NR
(start));
set_page_count(
mem_map+MAP_NR
(start), 1);
ClearPageReserved(
virt_to_page
(start));
set_page_count(
virt_to_page
(start), 1);
free_page(start);
num_physpages++;
totalram_pages++;
}
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
#endif
}
#endif
arch/parisc/mm/ioremap.c
0 → 100644
View file @
6b3efc2a
/*
* arch/parisc/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's
*
* (C) Copyright 1995 1996 Linus Torvalds
* (C) Copyright 2001 Helge Deller <deller@gmx.de>
*/
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
static
inline
void
remap_area_pte
(
pte_t
*
pte
,
unsigned
long
address
,
unsigned
long
size
,
unsigned
long
phys_addr
,
unsigned
long
flags
)
{
unsigned
long
end
;
address
&=
~
PMD_MASK
;
end
=
address
+
size
;
if
(
end
>
PMD_SIZE
)
end
=
PMD_SIZE
;
if
(
address
>=
end
)
BUG
();
do
{
if
(
!
pte_none
(
*
pte
))
{
printk
(
KERN_ERR
"remap_area_pte: page already exists
\n
"
);
BUG
();
}
set_pte
(
pte
,
mk_pte_phys
(
phys_addr
,
__pgprot
(
_PAGE_PRESENT
|
_PAGE_RW
|
_PAGE_DIRTY
|
_PAGE_ACCESSED
|
flags
)));
address
+=
PAGE_SIZE
;
phys_addr
+=
PAGE_SIZE
;
pte
++
;
}
while
(
address
&&
(
address
<
end
));
}
static
inline
int
remap_area_pmd
(
pmd_t
*
pmd
,
unsigned
long
address
,
unsigned
long
size
,
unsigned
long
phys_addr
,
unsigned
long
flags
)
{
unsigned
long
end
;
address
&=
~
PGDIR_MASK
;
end
=
address
+
size
;
if
(
end
>
PGDIR_SIZE
)
end
=
PGDIR_SIZE
;
phys_addr
-=
address
;
if
(
address
>=
end
)
BUG
();
do
{
pte_t
*
pte
=
pte_alloc_kernel
(
NULL
,
pmd
,
address
);
if
(
!
pte
)
return
-
ENOMEM
;
remap_area_pte
(
pte
,
address
,
end
-
address
,
address
+
phys_addr
,
flags
);
address
=
(
address
+
PMD_SIZE
)
&
PMD_MASK
;
pmd
++
;
}
while
(
address
&&
(
address
<
end
));
return
0
;
}
#if (USE_HPPA_IOREMAP)
static
int
remap_area_pages
(
unsigned
long
address
,
unsigned
long
phys_addr
,
unsigned
long
size
,
unsigned
long
flags
)
{
int
error
;
pgd_t
*
dir
;
unsigned
long
end
=
address
+
size
;
phys_addr
-=
address
;
dir
=
pgd_offset
(
&
init_mm
,
address
);
flush_cache_all
();
if
(
address
>=
end
)
BUG
();
spin_lock
(
&
init_mm
.
page_table_lock
);
do
{
pmd_t
*
pmd
;
pmd
=
pmd_alloc
(
dir
,
address
);
error
=
-
ENOMEM
;
if
(
!
pmd
)
break
;
if
(
remap_area_pmd
(
pmd
,
address
,
end
-
address
,
phys_addr
+
address
,
flags
))
break
;
error
=
0
;
address
=
(
address
+
PGDIR_SIZE
)
&
PGDIR_MASK
;
dir
++
;
}
while
(
address
&&
(
address
<
end
));
spin_unlock
(
&
init_mm
.
page_table_lock
);
flush_tlb_all
();
return
error
;
}
#endif
/* USE_HPPA_IOREMAP */
/*
* Generic mapping function (not visible outside):
*/
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
void
*
__ioremap
(
unsigned
long
phys_addr
,
unsigned
long
size
,
unsigned
long
flags
)
{
#if !(USE_HPPA_IOREMAP)
unsigned
long
end
=
phys_addr
+
size
-
1
;
/* Support EISA addresses */
if
((
phys_addr
>=
0x00080000
&&
end
<
0x000fffff
)
||
(
phys_addr
>=
0x00500000
&&
end
<
0x03bfffff
))
{
phys_addr
|=
0xfc000000
;
}
return
(
void
*
)
phys_addr
;
#else
void
*
addr
;
struct
vm_struct
*
area
;
unsigned
long
offset
,
last_addr
;
/* Don't allow wraparound or zero size */
last_addr
=
phys_addr
+
size
-
1
;
if
(
!
size
||
last_addr
<
phys_addr
)
return
NULL
;
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
if
(
phys_addr
<
virt_to_phys
(
high_memory
))
{
char
*
t_addr
,
*
t_end
;
struct
page
*
page
;
t_addr
=
__va
(
phys_addr
);
t_end
=
t_addr
+
(
size
-
1
);
for
(
page
=
virt_to_page
(
t_addr
);
page
<=
virt_to_page
(
t_end
);
page
++
)
if
(
!
PageReserved
(
page
))
return
NULL
;
}
/*
* Mappings have to be page-aligned
*/
offset
=
phys_addr
&
~
PAGE_MASK
;
phys_addr
&=
PAGE_MASK
;
size
=
PAGE_ALIGN
(
last_addr
)
-
phys_addr
;
/*
* Ok, go for it..
*/
area
=
get_vm_area
(
size
,
VM_IOREMAP
);
if
(
!
area
)
return
NULL
;
addr
=
area
->
addr
;
if
(
remap_area_pages
(
VMALLOC_VMADDR
(
addr
),
phys_addr
,
size
,
flags
))
{
vfree
(
addr
);
return
NULL
;
}
return
(
void
*
)
(
offset
+
(
char
*
)
addr
);
#endif
}
void
iounmap
(
void
*
addr
)
{
#if !(USE_HPPA_IOREMAP)
return
;
#else
if
(
addr
>
high_memory
)
return
vfree
((
void
*
)
(
PAGE_MASK
&
(
unsigned
long
)
addr
));
#endif
}
arch/parisc/mm/pa11.c
deleted
100644 → 0
View file @
1e0b058c
/* $Id: pa11.c,v 1.1 1999/03/17 01:05:41 pjlahaie Exp $
*
* pa11.c: PA 1.1 specific mmu/cache code.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/sgialib.h>
#include <asm/mmu_context.h>
extern
unsigned
long
mips_tlb_entries
;
/* page functions */
void
pa11_clear_page
(
unsigned
long
page
)
{
}
static
void
pa11_copy_page
(
unsigned
long
to
,
unsigned
long
from
)
{
}
/* Cache operations. */
static
inline
void
pa11_flush_cache_all
(
void
)
{
}
static
void
pa11_flush_cache_mm
(
struct
mm_struct
*
mm
)
{
}
static
void
pa11_flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
}
static
void
pa11_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
{
}
static
void
pa11_flush_page_to_ram
(
unsigned
long
page
)
{
}
static
void
pa11_flush_cache_sigtramp
(
unsigned
long
page
)
{
}
/* TLB operations. */
static
inline
void
pa11_flush_tlb_all
(
void
)
{
unsigned
long
flags
;
int
entry
;
save_and_cli
(
flags
);
/* Here we will need to flush all the TLBs */
restore_flags
(
flags
);
}
static
void
pa11_flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
/* This is what the MIPS does.. Is it the right thing for PA-RISC? */
if
(
mm
==
current
->
mm
)
pa11_flush_tlb_all
();
}
static
void
pa11_flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
if
(
vma
==
NULL
||
vma
->
vm_mm
==
current
->
mm
)
pa11_flush_tlb_all
();
}
static
void
pa11_flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
{
if
(
vma
->
vm_mm
==
current
->
mm
)
pa11_flush_tlb_all
();
}
static
void
pa11_load_pgd
(
unsigned
long
pg_dir
)
{
unsigned
long
flags
;
/* We need to do the right thing here */
}
/*
* Initialize new page directory with pointers to invalid ptes
*/
static
void
pa11_pgd_init
(
unsigned
long
page
)
{
unsigned
long
dummy1
,
dummy2
;
}
static
void
pa11_update_mmu_cache
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
pte_t
pte
)
{
pa11_flush_tlb_page
(
vma
,
address
);
}
static
void
pa11_show_regs
(
struct
pt_regs
*
regs
)
{
/*
* Saved main processor registers
*/
printk
(
"$0 : %08x %08lx %08lx %08lx %08lx %08lx %08lx %08lx
\n
"
,
0
,
(
unsigned
long
)
regs
->
regs
[
1
],
(
unsigned
long
)
regs
->
regs
[
2
],
(
unsigned
long
)
regs
->
regs
[
3
],
(
unsigned
long
)
regs
->
regs
[
4
],
(
unsigned
long
)
regs
->
regs
[
5
],
(
unsigned
long
)
regs
->
regs
[
6
],
(
unsigned
long
)
regs
->
regs
[
7
]);
printk
(
"$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx
\n
"
,
(
unsigned
long
)
regs
->
regs
[
8
],
(
unsigned
long
)
regs
->
regs
[
9
],
(
unsigned
long
)
regs
->
regs
[
10
],
(
unsigned
long
)
regs
->
regs
[
11
],
(
unsigned
long
)
regs
->
regs
[
12
],
(
unsigned
long
)
regs
->
regs
[
13
],
(
unsigned
long
)
regs
->
regs
[
14
],
(
unsigned
long
)
regs
->
regs
[
15
]);
printk
(
"$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx
\n
"
,
(
unsigned
long
)
regs
->
regs
[
16
],
(
unsigned
long
)
regs
->
regs
[
17
],
(
unsigned
long
)
regs
->
regs
[
18
],
(
unsigned
long
)
regs
->
regs
[
19
],
(
unsigned
long
)
regs
->
regs
[
20
],
(
unsigned
long
)
regs
->
regs
[
21
],
(
unsigned
long
)
regs
->
regs
[
22
],
(
unsigned
long
)
regs
->
regs
[
23
]);
printk
(
"$24: %08lx %08lx %08lx %08lx %08lx %08lx
\n
"
,
(
unsigned
long
)
regs
->
regs
[
24
],
(
unsigned
long
)
regs
->
regs
[
25
],
(
unsigned
long
)
regs
->
regs
[
28
],
(
unsigned
long
)
regs
->
regs
[
29
],
(
unsigned
long
)
regs
->
regs
[
30
],
(
unsigned
long
)
regs
->
regs
[
31
]);
/*
* Saved cp0 registers
*/
printk
(
"epc : %08lx %s
\n
Status: %08x
\n
Cause : %08x
\n
"
,
(
unsigned
long
)
regs
->
cp0_epc
,
print_tainted
(),
(
unsigned
int
)
regs
->
cp0_status
,
(
unsigned
int
)
regs
->
cp0_cause
);
}
static
int
pa11_user_mode
(
struct
pt_regs
*
regs
)
{
/* Return user mode stuff?? */
}
__initfunc
(
void
ld_mmu_pa11
(
void
))
{
/* Taken directly from the MIPS arch.. Lots of bad things here */
clear_page
=
pa11_clear_page
;
copy_page
=
pa11_copy_page
;
flush_cache_all
=
pa11_flush_cache_all
;
flush_cache_mm
=
pa11_flush_cache_mm
;
flush_cache_range
=
pa11_flush_cache_range
;
flush_cache_page
=
pa11_flush_cache_page
;
flush_cache_sigtramp
=
pa11_flush_cache_sigtramp
;
flush_page_to_ram
=
pa11_flush_page_to_ram
;
flush_tlb_all
=
pa11_flush_tlb_all
;
flush_tlb_mm
=
pa11_flush_tlb_mm
;
flush_tlb_range
=
pa11_flush_tlb_range
;
flush_tlb_page
=
pa11_flush_tlb_page
;
pa11_asid_setup
();
load_pgd
=
pa11_load_pgd
;
pgd_init
=
pa11_pgd_init
;
update_mmu_cache
=
pa11_update_mmu_cache
;
show_regs
=
pa11_show_regs
;
add_wired_entry
=
pa11_add_wired_entry
;
user_mode
=
pa11_user_mode
;
flush_tlb_all
();
}
arch/parisc/mm/pa20.c
deleted
100644 → 0
View file @
1e0b058c
/* $Id: pa20.c,v 1.1 1999/03/17 01:05:41 pjlahaie Exp $
*
* pa20.c: PA 2.0 specific mmu/cache code.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/sgialib.h>
#include <asm/mmu_context.h>
extern
unsigned
long
mips_tlb_entries
;
/* page functions */
void
pa20_clear_page
(
unsigned
long
page
)
{
}
static
void
pa20_copy_page
(
unsigned
long
to
,
unsigned
long
from
)
{
}
/* Cache operations. */
static
inline
void
pa20_flush_cache_all
(
void
)
{
}
static
void
pa20_flush_cache_mm
(
struct
mm_struct
*
mm
)
{
}
static
void
pa20_flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
}
static
void
pa20_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
{
}
static
void
pa20_flush_page_to_ram
(
unsigned
long
page
)
{
}
static
void
pa20_flush_cache_sigtramp
(
unsigned
long
page
)
{
}
/* TLB operations. */
static
inline
void
pa20_flush_tlb_all
(
void
)
{
unsigned
long
flags
;
int
entry
;
save_and_cli
(
flags
);
/* Here we will need to flush all the TLBs */
restore_flags
(
flags
);
}
static
void
pa20_flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
/* This is what the MIPS does.. Is it the right thing for PA-RISC? */
if
(
mm
==
current
->
mm
)
pa20_flush_tlb_all
();
}
static
void
pa20_flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
if
(
vma
==
NULL
||
vma
->
vm_mm
==
current
->
mm
)
pa20_flush_tlb_all
();
}
static
void
pa20_flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
{
if
(
vma
->
vm_mm
==
current
->
mm
)
pa20_flush_tlb_all
();
}
static
void
pa20_load_pgd
(
unsigned
long
pg_dir
)
{
unsigned
long
flags
;
/* We need to do the right thing here */
}
/*
* Initialize new page directory with pointers to invalid ptes
*/
static
void
pa20_pgd_init
(
unsigned
long
page
)
{
unsigned
long
dummy1
,
dummy2
;
}
static
void
pa20_update_mmu_cache
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
pte_t
pte
)
{
pa20_flush_tlb_page
(
vma
,
address
);
}
static
void
pa20_show_regs
(
struct
pt_regs
*
regs
)
{
/*
* Saved main processor registers
*/
printk
(
"$0 : %08x %08lx %08lx %08lx %08lx %08lx %08lx %08lx
\n
"
,
0
,
(
unsigned
long
)
regs
->
regs
[
1
],
(
unsigned
long
)
regs
->
regs
[
2
],
(
unsigned
long
)
regs
->
regs
[
3
],
(
unsigned
long
)
regs
->
regs
[
4
],
(
unsigned
long
)
regs
->
regs
[
5
],
(
unsigned
long
)
regs
->
regs
[
6
],
(
unsigned
long
)
regs
->
regs
[
7
]);
printk
(
"$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx
\n
"
,
(
unsigned
long
)
regs
->
regs
[
8
],
(
unsigned
long
)
regs
->
regs
[
9
],
(
unsigned
long
)
regs
->
regs
[
10
],
(
unsigned
long
)
regs
->
regs
[
11
],
(
unsigned
long
)
regs
->
regs
[
12
],
(
unsigned
long
)
regs
->
regs
[
13
],
(
unsigned
long
)
regs
->
regs
[
14
],
(
unsigned
long
)
regs
->
regs
[
15
]);
printk
(
"$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx
\n
"
,
(
unsigned
long
)
regs
->
regs
[
16
],
(
unsigned
long
)
regs
->
regs
[
17
],
(
unsigned
long
)
regs
->
regs
[
18
],
(
unsigned
long
)
regs
->
regs
[
19
],
(
unsigned
long
)
regs
->
regs
[
20
],
(
unsigned
long
)
regs
->
regs
[
21
],
(
unsigned
long
)
regs
->
regs
[
22
],
(
unsigned
long
)
regs
->
regs
[
23
]);
printk
(
"$24: %08lx %08lx %08lx %08lx %08lx %08lx
\n
"
,
(
unsigned
long
)
regs
->
regs
[
24
],
(
unsigned
long
)
regs
->
regs
[
25
],
(
unsigned
long
)
regs
->
regs
[
28
],
(
unsigned
long
)
regs
->
regs
[
29
],
(
unsigned
long
)
regs
->
regs
[
30
],
(
unsigned
long
)
regs
->
regs
[
31
]);
/*
* Saved cp0 registers
*/
printk
(
"epc : %08lx %s
\n
Status: %08x
\n
Cause : %08x
\n
"
,
(
unsigned
long
)
regs
->
cp0_epc
,
print_tainted
(),
(
unsigned
int
)
regs
->
cp0_status
,
(
unsigned
int
)
regs
->
cp0_cause
);
}
static
int
pa20_user_mode
(
struct
pt_regs
*
regs
)
{
/* Return user mode stuff?? */
}
__initfunc
(
void
ld_mmu_pa20
(
void
))
{
/* Taken directly from the MIPS arch.. Lots of bad things here */
clear_page
=
pa20_clear_page
;
copy_page
=
pa20_copy_page
;
flush_cache_all
=
pa20_flush_cache_all
;
flush_cache_mm
=
pa20_flush_cache_mm
;
flush_cache_range
=
pa20_flush_cache_range
;
flush_cache_page
=
pa20_flush_cache_page
;
flush_cache_sigtramp
=
pa20_flush_cache_sigtramp
;
flush_page_to_ram
=
pa20_flush_page_to_ram
;
flush_tlb_all
=
pa20_flush_tlb_all
;
flush_tlb_mm
=
pa20_flush_tlb_mm
;
flush_tlb_range
=
pa20_flush_tlb_range
;
flush_tlb_page
=
pa20_flush_tlb_page
;
pa20_asid_setup
();
load_pgd
=
pa20_load_pgd
;
pgd_init
=
pa20_pgd_init
;
update_mmu_cache
=
pa20_update_mmu_cache
;
show_regs
=
pa20_show_regs
;
add_wired_entry
=
pa20_add_wired_entry
;
user_mode
=
pa20_user_mode
;
flush_tlb_all
();
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment