Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
9d912132
Commit
9d912132
authored
May 22, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Manual merge fixup
parents
d33fb4fe
dee4f8ff
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
115 additions
and
311 deletions
+115
-311
arch/m68k/mm/init.c
arch/m68k/mm/init.c
+1
-48
arch/m68k/mm/kmap.c
arch/m68k/mm/kmap.c
+4
-4
arch/m68k/mm/memory.c
arch/m68k/mm/memory.c
+0
-63
arch/m68k/mm/motorola.c
arch/m68k/mm/motorola.c
+3
-5
arch/m68k/mm/sun3mmu.c
arch/m68k/mm/sun3mmu.c
+0
-2
include/asm-m68k/motorola_pgalloc.h
include/asm-m68k/motorola_pgalloc.h
+48
-112
include/asm-m68k/sun3_pgalloc.h
include/asm-m68k/sun3_pgalloc.h
+40
-77
include/asm-m68k/tlb.h
include/asm-m68k/tlb.h
+19
-0
No files found.
arch/m68k/mm/init.c
View file @
9d912132
...
@@ -35,20 +35,6 @@
...
@@ -35,20 +35,6 @@
mmu_gather_t
mmu_gathers
[
NR_CPUS
];
mmu_gather_t
mmu_gathers
[
NR_CPUS
];
int
do_check_pgt_cache
(
int
low
,
int
high
)
{
int
freed
=
0
;
if
(
pgtable_cache_size
>
high
)
{
do
{
if
(
pmd_quicklist
)
freed
+=
free_pmd_slow
(
get_pmd_fast
());
if
(
pte_quicklist
)
free_pte_slow
(
get_pte_fast
()),
freed
++
;
}
while
(
pgtable_cache_size
>
low
);
}
return
freed
;
}
/*
/*
* BAD_PAGE is the page that is used for page faults when linux
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* is out-of-memory. Older versions of linux just did a
...
@@ -56,27 +42,9 @@ int do_check_pgt_cache(int low, int high)
...
@@ -56,27 +42,9 @@ int do_check_pgt_cache(int low, int high)
* for a process dying in kernel mode, possibly leaving an inode
* for a process dying in kernel mode, possibly leaving an inode
* unused etc..
* unused etc..
*
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
* data and COW.
*/
*/
unsigned
long
empty_bad_page_table
;
pte_t
*
__bad_pagetable
(
void
)
{
memset
((
void
*
)
empty_bad_page_table
,
0
,
PAGE_SIZE
);
return
(
pte_t
*
)
empty_bad_page_table
;
}
unsigned
long
empty_bad_page
;
pte_t
__bad_page
(
void
)
{
memset
((
void
*
)
empty_bad_page
,
0
,
PAGE_SIZE
);
return
pte_mkdirty
(
__mk_pte
(
empty_bad_page
,
PAGE_SHARED
));
}
unsigned
long
empty_zero_page
;
unsigned
long
empty_zero_page
;
...
@@ -106,7 +74,6 @@ void show_mem(void)
...
@@ -106,7 +74,6 @@ void show_mem(void)
printk
(
"%d reserved pages
\n
"
,
reserved
);
printk
(
"%d reserved pages
\n
"
,
reserved
);
printk
(
"%d pages shared
\n
"
,
shared
);
printk
(
"%d pages shared
\n
"
,
shared
);
printk
(
"%d pages swap cached
\n
"
,
cached
);
printk
(
"%d pages swap cached
\n
"
,
cached
);
printk
(
"%ld pages in page table cache
\n
"
,
pgtable_cache_size
);
}
}
extern
void
init_pointer_table
(
unsigned
long
ptable
);
extern
void
init_pointer_table
(
unsigned
long
ptable
);
...
@@ -126,7 +93,7 @@ void __init mem_init(void)
...
@@ -126,7 +93,7 @@ void __init mem_init(void)
unsigned
long
tmp
;
unsigned
long
tmp
;
int
i
;
int
i
;
max_mapnr
=
num_physpages
=
MAP_NR
(
high_memory
);
max_mapnr
=
num_physpages
=
(((
unsigned
long
)
high_memory
-
PAGE_OFFSET
)
>>
PAGE_SHIFT
);
#ifdef CONFIG_ATARI
#ifdef CONFIG_ATARI
if
(
MACH_IS_ATARI
)
if
(
MACH_IS_ATARI
)
...
@@ -137,12 +104,6 @@ void __init mem_init(void)
...
@@ -137,12 +104,6 @@ void __init mem_init(void)
totalram_pages
=
free_all_bootmem
();
totalram_pages
=
free_all_bootmem
();
for
(
tmp
=
PAGE_OFFSET
;
tmp
<
(
unsigned
long
)
high_memory
;
tmp
+=
PAGE_SIZE
)
{
for
(
tmp
=
PAGE_OFFSET
;
tmp
<
(
unsigned
long
)
high_memory
;
tmp
+=
PAGE_SIZE
)
{
#if 0
#ifndef CONFIG_SUN3
if (virt_to_phys ((void *)tmp) >= mach_max_dma_address)
clear_bit(PG_DMA, &virt_to_page(tmp)->flags);
#endif
#endif
if
(
PageReserved
(
virt_to_page
(
tmp
)))
{
if
(
PageReserved
(
virt_to_page
(
tmp
)))
{
if
(
tmp
>=
(
unsigned
long
)
&
_text
if
(
tmp
>=
(
unsigned
long
)
&
_text
&&
tmp
<
(
unsigned
long
)
&
_etext
)
&&
tmp
<
(
unsigned
long
)
&
_etext
)
...
@@ -154,14 +115,6 @@ void __init mem_init(void)
...
@@ -154,14 +115,6 @@ void __init mem_init(void)
datapages
++
;
datapages
++
;
continue
;
continue
;
}
}
#if 0
set_page_count(virt_to_page(tmp), 1);
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start ||
(tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end))
#endif
free_page(tmp);
#endif
}
}
#ifndef CONFIG_SUN3
#ifndef CONFIG_SUN3
...
...
arch/m68k/mm/kmap.c
View file @
9d912132
...
@@ -189,7 +189,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
...
@@ -189,7 +189,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
printk
(
"
\n
pa=%#lx va=%#lx "
,
physaddr
,
virtaddr
);
printk
(
"
\n
pa=%#lx va=%#lx "
,
physaddr
,
virtaddr
);
#endif
#endif
pgd_dir
=
pgd_offset_k
(
virtaddr
);
pgd_dir
=
pgd_offset_k
(
virtaddr
);
pmd_dir
=
pmd_alloc
_kernel
(
pgd_dir
,
virtaddr
);
pmd_dir
=
pmd_alloc
(
&
init_mm
,
pgd_dir
,
virtaddr
);
if
(
!
pmd_dir
)
{
if
(
!
pmd_dir
)
{
printk
(
"ioremap: no mem for pmd_dir
\n
"
);
printk
(
"ioremap: no mem for pmd_dir
\n
"
);
return
NULL
;
return
NULL
;
...
@@ -201,7 +201,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
...
@@ -201,7 +201,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
virtaddr
+=
PTRTREESIZE
;
virtaddr
+=
PTRTREESIZE
;
size
-=
PTRTREESIZE
;
size
-=
PTRTREESIZE
;
}
else
{
}
else
{
pte_dir
=
pte_alloc_kernel
(
pmd_dir
,
virtaddr
);
pte_dir
=
pte_alloc_kernel
(
&
init_mm
,
pmd_dir
,
virtaddr
);
if
(
!
pte_dir
)
{
if
(
!
pte_dir
)
{
printk
(
"ioremap: no mem for pte_dir
\n
"
);
printk
(
"ioremap: no mem for pte_dir
\n
"
);
return
NULL
;
return
NULL
;
...
@@ -273,7 +273,7 @@ void __iounmap(void *addr, unsigned long size)
...
@@ -273,7 +273,7 @@ void __iounmap(void *addr, unsigned long size)
pmd_clear
(
pmd_dir
);
pmd_clear
(
pmd_dir
);
return
;
return
;
}
}
pte_dir
=
pte_offset
(
pmd_dir
,
virtaddr
);
pte_dir
=
pte_offset
_kernel
(
pmd_dir
,
virtaddr
);
pte_val
(
*
pte_dir
)
=
0
;
pte_val
(
*
pte_dir
)
=
0
;
virtaddr
+=
PAGE_SIZE
;
virtaddr
+=
PAGE_SIZE
;
...
@@ -350,7 +350,7 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
...
@@ -350,7 +350,7 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
pmd_clear
(
pmd_dir
);
pmd_clear
(
pmd_dir
);
return
;
return
;
}
}
pte_dir
=
pte_offset
(
pmd_dir
,
virtaddr
);
pte_dir
=
pte_offset
_kernel
(
pmd_dir
,
virtaddr
);
pte_val
(
*
pte_dir
)
=
(
pte_val
(
*
pte_dir
)
&
_CACHEMASK040
)
|
cmode
;
pte_val
(
*
pte_dir
)
=
(
pte_val
(
*
pte_dir
)
&
_CACHEMASK040
)
|
cmode
;
virtaddr
+=
PAGE_SIZE
;
virtaddr
+=
PAGE_SIZE
;
...
...
arch/m68k/mm/memory.c
View file @
9d912132
...
@@ -25,69 +25,6 @@
...
@@ -25,69 +25,6 @@
#include <asm/amigahw.h>
#include <asm/amigahw.h>
#endif
#endif
struct
pgtable_cache_struct
quicklists
;
void
__bad_pte
(
pmd_t
*
pmd
)
{
printk
(
"Bad pmd in pte_alloc: %08lx
\n
"
,
pmd_val
(
*
pmd
));
pmd_set
(
pmd
,
BAD_PAGETABLE
);
}
void
__bad_pmd
(
pgd_t
*
pgd
)
{
printk
(
"Bad pgd in pmd_alloc: %08lx
\n
"
,
pgd_val
(
*
pgd
));
pgd_set
(
pgd
,
(
pmd_t
*
)
BAD_PAGETABLE
);
}
#if 0
pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
{
pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (pte) {
clear_page(pte);
__flush_page_to_ram((unsigned long)pte);
flush_tlb_kernel_page((unsigned long)pte);
nocache_page((unsigned long)pte);
pmd_set(pmd, pte);
return pte + offset;
}
pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long)pte);
if (pmd_bad(*pmd)) {
__bad_pte(pmd);
return NULL;
}
return (pte_t *)__pmd_page(*pmd) + offset;
}
#endif
#if 0
pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
{
pmd_t *pmd;
pmd = get_pointer_table();
if (pgd_none(*pgd)) {
if (pmd) {
pgd_set(pgd, pmd);
return pmd + offset;
}
pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
return NULL;
}
free_pointer_table(pmd);
if (pgd_bad(*pgd)) {
__bad_pmd(pgd);
return NULL;
}
return (pmd_t *)__pgd_page(*pgd) + offset;
}
#endif
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
struct page instead of separately kmalloced struct. Stolen from
struct page instead of separately kmalloced struct. Stolen from
...
...
arch/m68k/mm/motorola.c
View file @
9d912132
...
@@ -176,7 +176,7 @@ map_chunk (unsigned long addr, long size)
...
@@ -176,7 +176,7 @@ map_chunk (unsigned long addr, long size)
pte_dir
=
kernel_page_table
();
pte_dir
=
kernel_page_table
();
pmd_set
(
pmd_dir
,
pte_dir
);
pmd_set
(
pmd_dir
,
pte_dir
);
}
}
pte_dir
=
pte_offset
(
pmd_dir
,
virtaddr
);
pte_dir
=
pte_offset
_kernel
(
pmd_dir
,
virtaddr
);
if
(
virtaddr
)
{
if
(
virtaddr
)
{
if
(
!
pte_present
(
*
pte_dir
))
if
(
!
pte_present
(
*
pte_dir
))
...
@@ -262,15 +262,13 @@ void __init paging_init(void)
...
@@ -262,15 +262,13 @@ void __init paging_init(void)
* initialize the bad page table and bad page to point
* initialize the bad page table and bad page to point
* to a couple of allocated pages
* to a couple of allocated pages
*/
*/
empty_bad_page_table
=
(
unsigned
long
)
alloc_bootmem_pages
(
PAGE_SIZE
);
empty_bad_page
=
(
unsigned
long
)
alloc_bootmem_pages
(
PAGE_SIZE
);
empty_zero_page
=
(
unsigned
long
)
alloc_bootmem_pages
(
PAGE_SIZE
);
empty_zero_page
=
(
unsigned
long
)
alloc_bootmem_pages
(
PAGE_SIZE
);
memset
((
void
*
)
empty_zero_page
,
0
,
PAGE_SIZE
);
memset
((
void
*
)
empty_zero_page
,
0
,
PAGE_SIZE
);
/*
/*
* Set up SFC/DFC registers
(user data space)
* Set up SFC/DFC registers
*/
*/
set_fs
(
USER
_DS
);
set_fs
(
KERNEL
_DS
);
#ifdef DEBUG
#ifdef DEBUG
printk
(
"before free_area_init
\n
"
);
printk
(
"before free_area_init
\n
"
);
...
...
arch/m68k/mm/sun3mmu.c
View file @
9d912132
...
@@ -59,8 +59,6 @@ void __init paging_init(void)
...
@@ -59,8 +59,6 @@ void __init paging_init(void)
#ifdef TEST_VERIFY_AREA
#ifdef TEST_VERIFY_AREA
wp_works_ok
=
0
;
wp_works_ok
=
0
;
#endif
#endif
empty_bad_page_table
=
(
unsigned
long
)
alloc_bootmem_pages
(
PAGE_SIZE
);
empty_bad_page
=
(
unsigned
long
)
alloc_bootmem_pages
(
PAGE_SIZE
);
empty_zero_page
=
(
unsigned
long
)
alloc_bootmem_pages
(
PAGE_SIZE
);
empty_zero_page
=
(
unsigned
long
)
alloc_bootmem_pages
(
PAGE_SIZE
);
memset
((
void
*
)
empty_zero_page
,
0
,
PAGE_SIZE
);
memset
((
void
*
)
empty_zero_page
,
0
,
PAGE_SIZE
);
...
...
include/asm-m68k/motorola_pgalloc.h
View file @
9d912132
#ifndef _MOTOROLA_PGALLOC_H
#ifndef _MOTOROLA_PGALLOC_H
#define _MOTOROLA_PGALLOC_H
#define _MOTOROLA_PGALLOC_H
#include <asm/tlbflush.h>
#include <asm/tlb.h>
extern
struct
pgtable_cache_struct
{
unsigned
long
*
pmd_cache
;
unsigned
long
*
pte_cache
;
/* This counts in units of pointer tables, of which can be eight per page. */
unsigned
long
pgtable_cache_sz
;
}
quicklists
;
#define pgd_quicklist ((unsigned long *)0)
#define pmd_quicklist (quicklists.pmd_cache)
#define pte_quicklist (quicklists.pte_cache)
/* This isn't accurate because of fragmentation of allocated pages for
pointer tables, but that should not be a problem. */
#define pgtable_cache_size ((quicklists.pgtable_cache_sz+7)/8)
extern
pte_t
*
get_pte_slow
(
pmd_t
*
pmd
,
unsigned
long
offset
);
extern
pmd_t
*
get_pmd_slow
(
pgd_t
*
pgd
,
unsigned
long
offset
);
extern
pmd_t
*
get_pointer_table
(
void
);
extern
pmd_t
*
get_pointer_table
(
void
);
extern
int
free_pointer_table
(
pmd_t
*
);
extern
int
free_pointer_table
(
pmd_t
*
);
extern
inline
pte_t
*
get_pte_fast
(
void
)
static
inline
pte_t
*
pte_alloc_one_kernel
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
{
unsigned
long
*
ret
;
pte_t
*
pte
;
ret
=
pte_quicklist
;
if
(
ret
)
{
pte_quicklist
=
(
unsigned
long
*
)
*
ret
;
ret
[
0
]
=
0
;
quicklists
.
pgtable_cache_sz
-=
8
;
}
return
(
pte_t
*
)
ret
;
}
#define pte_alloc_one_fast(mm,addr) get_pte_fast()
static
inline
pte_t
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
pte_t
*
pte
;
pte
=
(
pte_t
*
)
__get_free_page
(
GFP_KERNEL
);
pte
=
(
pte_t
*
)
__get_free_page
(
GFP_KERNEL
);
if
(
pte
)
{
if
(
pte
)
{
clear_page
(
pte
);
clear_page
(
pte
);
__flush_page_to_ram
((
unsigned
long
)
pte
);
__flush_page_to_ram
((
unsigned
long
)
pte
);
flush_tlb_kernel_page
((
unsigned
long
)
pte
);
flush_tlb_kernel_page
((
unsigned
long
)
pte
);
nocache_page
((
unsigned
long
)
pte
);
nocache_page
((
unsigned
long
)
pte
);
}
}
return
pte
;
return
pte
;
}
}
static
inline
void
pte_free_kernel
(
pte_t
*
pte
)
extern
__inline__
pmd_t
*
pmd_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
return
get_pointer_table
();
}
extern
inline
void
free_pte_fast
(
pte_t
*
pte
)
{
*
(
unsigned
long
*
)
pte
=
(
unsigned
long
)
pte_quicklist
;
pte_quicklist
=
(
unsigned
long
*
)
pte
;
quicklists
.
pgtable_cache_sz
+=
8
;
}
extern
inline
void
free_pte_slow
(
pte_t
*
pte
)
{
{
cache_page
((
unsigned
long
)
pte
);
cache_page
((
unsigned
long
)
pte
);
free_page
((
unsigned
long
)
pte
);
free_page
((
unsigned
long
)
pte
);
}
}
extern
inline
pmd_t
*
get_pmd_fast
(
void
)
static
inline
struct
page
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
{
unsigned
long
*
ret
;
struct
page
*
page
=
alloc_pages
(
GFP_KERNEL
,
0
);
pte_t
*
pte
;
if
(
!
page
)
return
NULL
;
ret
=
pmd_quicklist
;
pte
=
kmap
(
page
);
if
(
ret
)
{
if
(
pte
)
{
pmd_quicklist
=
(
unsigned
long
*
)
*
ret
;
clear_page
(
pte
);
ret
[
0
]
=
0
;
__flush_page_to_ram
((
unsigned
long
)
pte
);
quicklists
.
pgtable_cache_sz
--
;
flush_tlb_kernel_page
((
unsigned
long
)
pte
);
nocache_page
((
unsigned
long
)
pte
);
}
}
return
(
pmd_t
*
)
ret
;
kunmap
(
pte
);
}
#define pmd_alloc_one_fast(mm,addr) get_pmd_fast()
extern
inline
void
free_pmd_fast
(
pmd_t
*
pmd
)
return
page
;
{
*
(
unsigned
long
*
)
pmd
=
(
unsigned
long
)
pmd_quicklist
;
pmd_quicklist
=
(
unsigned
long
*
)
pmd
;
quicklists
.
pgtable_cache_sz
++
;
}
}
extern
inline
int
free_pmd_slow
(
pmd_t
*
pmd
)
static
inline
void
pte_free
(
struct
page
*
page
)
{
{
return
free_pointer_table
(
pmd
);
cache_page
((
unsigned
long
)
kmap
(
page
));
kunmap
(
page
);
__free_page
(
page
);
}
}
/* The pgd cache is folded into the pmd cache, so these are dummy routines. */
static
inline
void
pte_free_tlb
(
mmu_gather_t
*
tlb
,
struct
page
*
page
)
extern
inline
pgd_t
*
get_pgd_fast
(
void
)
{
{
return
(
pgd_t
*
)
0
;
cache_page
((
unsigned
long
)
kmap
(
page
));
kunmap
(
page
);
__free_page
(
page
);
}
}
extern
inline
void
free_pgd_fast
(
pgd_t
*
pgd
)
{
}
extern
inline
void
free_pgd_slow
(
pgd_t
*
pgd
)
static
inline
pmd_t
*
pmd_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
{
return
get_pointer_table
();
}
}
extern
void
__bad_pte
(
pmd_t
*
pmd
);
static
inline
int
pmd_free
(
pmd_t
*
pmd
)
extern
void
__bad_pmd
(
pgd_t
*
pgd
);
extern
inline
void
pte_free
(
pte_t
*
pte
)
{
{
free_pte_fast
(
pte
);
return
free_pointer_table
(
pmd
);
}
}
extern
inline
void
pmd_free
(
pmd_t
*
pmd
)
static
inline
int
pmd_free_tlb
(
mmu_gather_t
*
tlb
,
pmd_t
*
pmd
)
{
{
free_pmd_fast
(
pmd
);
return
free_pointer_table
(
pmd
);
}
}
extern
inline
void
pte_free_kernel
(
pte_t
*
pte
)
static
inline
void
pgd_free
(
pgd_t
*
pgd
)
{
{
free_pte_fast
(
pte
);
pmd_free
((
pmd_t
*
)
pgd
);
}
}
extern
inline
pte_t
*
pte_alloc_kernel
(
pmd_t
*
pmd
,
unsigned
long
address
)
static
inline
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
{
{
return
pte_alloc
(
&
init_mm
,
pmd
,
address
);
return
(
pgd_t
*
)
get_pointer_table
(
);
}
}
extern
inline
void
pmd_free_kernel
(
pmd_t
*
pmd
)
{
free_pmd_fast
(
pmd
);
}
extern
inline
pmd_t
*
pmd_alloc_kernel
(
pgd_t
*
pgd
,
unsigned
long
address
)
static
inline
void
pmd_populate_kernel
(
struct
mm_struct
*
mm
,
pmd_t
*
pmd
,
pte_t
*
pte
)
{
{
return
pmd_alloc
(
&
init_mm
,
pgd
,
address
);
pmd_set
(
pmd
,
pte
);
}
}
extern
inline
void
pgd_free
(
pgd_t
*
pgd
)
static
inline
void
pmd_populate
(
struct
mm_struct
*
mm
,
pmd_t
*
pmd
,
struct
page
*
page
)
{
{
free_pmd_fast
((
pmd_t
*
)
pgd
);
pmd_set
(
pmd
,
page_address
(
page
)
);
}
}
extern
inline
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
static
inline
void
pgd_populate
(
struct
mm_struct
*
mm
,
pgd_t
*
pgd
,
pmd_t
*
pmd
)
{
{
pgd_t
*
pgd
=
(
pgd_t
*
)
get_pmd_fast
();
pgd_set
(
pgd
,
pmd
);
if
(
!
pgd
)
pgd
=
(
pgd_t
*
)
get_pointer_table
();
return
pgd
;
}
}
#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE)
#define check_pgt_cache() do { } while (0)
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
extern
int
do_check_pgt_cache
(
int
,
int
);
extern
inline
void
set_pgdir
(
unsigned
long
address
,
pgd_t
entry
)
{
}
#endif
/* _MOTOROLA_PGALLOC_H */
#endif
/* _MOTOROLA_PGALLOC_H */
include/asm-m68k/sun3_pgalloc.h
View file @
9d912132
/* sun3_pgalloc.h --
/* sun3_pgalloc.h --
* reorganization around 2.3.39, routines moved from sun3_pgtable.h
* reorganization around 2.3.39, routines moved from sun3_pgtable.h
*
*
*
* 02/27/2002 -- Modified to support "highpte" implementation in 2.5.5 (Sam)
*
* moved 1/26/2000 Sam Creasey
* moved 1/26/2000 Sam Creasey
*/
*/
#ifndef _SUN3_PGALLOC_H
#ifndef _SUN3_PGALLOC_H
#define _SUN3_PGALLOC_H
#define _SUN3_PGALLOC_H
/* Pagetable caches. */
#include <asm/tlb.h>
//todo: should implement for at least ptes. --m
#define pgd_quicklist ((unsigned long *) 0)
#define pmd_quicklist ((unsigned long *) 0)
#define pte_quicklist ((unsigned long *) 0)
#define pgtable_cache_size (0L)
/* Allocation and deallocation of various flavours of pagetables. */
extern
inline
int
free_pmd_fast
(
pmd_t
*
pmdp
)
{
return
0
;
}
extern
inline
int
free_pmd_slow
(
pmd_t
*
pmdp
)
{
return
0
;
}
extern
inline
pmd_t
*
get_pmd_fast
(
void
)
{
return
(
pmd_t
*
)
0
;
}
//todo: implement the following properly.
#define get_pte_fast() ((pte_t *) 0)
#define get_pte_slow pte_alloc
#define free_pte_fast(pte)
#define free_pte_slow pte_free
/* FIXME - when we get this compiling */
/* FIXME - when we get this compiling */
/* erm, now that it's compiling, what do we do with it? */
/* erm, now that it's compiling, what do we do with it? */
#define _KERNPG_TABLE 0
#define _KERNPG_TABLE 0
extern
inline
void
pte_free_kernel
(
pte_t
*
pte
)
{
free_page
((
unsigned
long
)
pte
);
}
extern
const
char
bad_pmd_string
[];
extern
const
char
bad_pmd_string
[];
extern
inline
pte_t
*
pte_alloc_kernel
(
pmd_t
*
pmd
,
unsigned
long
address
)
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
{
address
=
(
address
>>
PAGE_SHIFT
)
&
(
PTRS_PER_PTE
-
1
);
if
(
pmd_none
(
*
pmd
))
{
pte_t
*
page
=
(
pte_t
*
)
get_free_page
(
GFP_KERNEL
);
if
(
pmd_none
(
*
pmd
))
{
if
(
page
)
{
pmd_val
(
*
pmd
)
=
_KERNPG_TABLE
+
__pa
(
page
);
return
page
+
address
;
}
pmd_val
(
*
pmd
)
=
_KERNPG_TABLE
+
__pa
((
unsigned
long
)
BAD_PAGETABLE
);
return
NULL
;
}
free_page
((
unsigned
long
)
page
);
}
if
(
pmd_bad
(
*
pmd
))
{
printk
(
bad_pmd_string
,
pmd_val
(
*
pmd
));
printk
(
"at kernel pgd off %08x
\n
"
,
(
unsigned
int
)
pmd
);
pmd_val
(
*
pmd
)
=
_KERNPG_TABLE
+
__pa
((
unsigned
long
)
BAD_PAGETABLE
);
return
NULL
;
}
return
(
pte_t
*
)
__pmd_page
(
*
pmd
)
+
address
;
}
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
static
inline
void
pte_free_kernel
(
pte_t
*
pte
)
* inside the pgd, so has no extra memory associated with it.
*/
extern
inline
void
pmd_free_kernel
(
pmd_t
*
pmd
)
{
{
// pmd_val(*pmd) = 0
;
free_page
((
unsigned
long
)
pte
)
;
}
}
extern
inline
pmd_t
*
pmd_alloc_kernel
(
pgd_t
*
pgd
,
unsigned
long
address
)
static
inline
void
pte_free
(
struct
page
*
page
)
{
{
return
(
pmd_t
*
)
pgd
;
__free_page
(
page
)
;
}
}
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
static
inline
void
pte_free_tlb
(
mmu_gather_t
*
tlb
,
struct
page
*
page
)
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
extern
inline
void
pte_free
(
pte_t
*
pte
)
{
{
free_page
((
unsigned
long
)
pt
e
);
tlb_remove_page
(
tlb
,
pag
e
);
}
}
static
inline
pte_t
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
static
inline
pte_t
*
pte_alloc_one_kernel
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
{
unsigned
long
page
=
__get_free_page
(
GFP_KERNEL
);
unsigned
long
page
=
__get_free_page
(
GFP_KERNEL
);
...
@@ -90,30 +45,45 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
...
@@ -90,30 +45,45 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return
NULL
;
return
NULL
;
memset
((
void
*
)
page
,
0
,
PAGE_SIZE
);
memset
((
void
*
)
page
,
0
,
PAGE_SIZE
);
// pmd_val(*pmd) = SUN3_PMD_MAGIC + __pa(page);
/* pmd_val(*pmd) = __pa(page); */
return
(
pte_t
*
)
(
page
);
return
(
pte_t
*
)
(
page
);
}
}
#define pte_alloc_one_fast(mm,addr) pte_alloc_one(mm,addr)
static
inline
struct
page
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
struct
page
*
page
=
alloc_pages
(
GFP_KERNEL
,
0
);
if
(
page
==
NULL
)
return
NULL
;
clear_highpage
(
page
);
return
page
;
}
static
inline
void
pmd_populate_kernel
(
struct
mm_struct
*
mm
,
pmd_t
*
pmd
,
pte_t
*
pte
)
{
pmd_val
(
*
pmd
)
=
__pa
((
unsigned
long
)
pte
);
}
#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = __pa((unsigned long)pte))
static
inline
void
pmd_populate
(
struct
mm_struct
*
mm
,
pmd_t
*
pmd
,
struct
page
*
page
)
{
pmd_val
(
*
pmd
)
=
__pa
((
unsigned
long
)
page_address
(
page
));
}
/*
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
* inside the pgd, so has no extra memory associated with it.
*/
*/
extern
inline
void
pmd_free
(
pmd_t
*
pmd
)
#define pmd_free(x) do { } while (0)
{
#define pmd_free_tlb(tlb, x) do { } while (0)
pmd_val
(
*
pmd
)
=
0
;
}
extern
inline
void
pgd_free
(
pgd_t
*
pgd
)
static
inline
void
pgd_free
(
pgd_t
*
pgd
)
{
{
free_page
((
unsigned
long
)
pgd
);
free_page
((
unsigned
long
)
pgd
);
}
}
extern
inline
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
static
inline
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
{
{
pgd_t
*
new_pgd
;
pgd_t
*
new_pgd
;
...
@@ -125,14 +95,6 @@ extern inline pgd_t * pgd_alloc(struct mm_struct *mm)
...
@@ -125,14 +95,6 @@ extern inline pgd_t * pgd_alloc(struct mm_struct *mm)
#define pgd_populate(mm, pmd, pte) BUG()
#define pgd_populate(mm, pmd, pte) BUG()
/* FIXME: the sun3 doesn't have a page table cache!
(but the motorola routine should just return 0) */
extern
int
do_check_pgt_cache
(
int
,
int
);
extern
inline
void
set_pgdir
(
unsigned
long
address
,
pgd_t
entry
)
{
}
/* Reserved PMEGs. */
/* Reserved PMEGs. */
extern
char
sun3_reserved_pmeg
[
SUN3_PMEGS_NUM
];
extern
char
sun3_reserved_pmeg
[
SUN3_PMEGS_NUM
];
...
@@ -141,5 +103,6 @@ extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
...
@@ -141,5 +103,6 @@ extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
extern
unsigned
char
pmeg_ctx
[
SUN3_PMEGS_NUM
];
extern
unsigned
char
pmeg_ctx
[
SUN3_PMEGS_NUM
];
#define check_pgt_cache() do { } while (0)
#endif
/* SUN3_PGALLOC_H */
#endif
/* SUN3_PGALLOC_H */
include/asm-m68k/tlb.h
View file @
9d912132
#ifndef _M68K_TLB_H
#define _M68K_TLB_H
/*
* m68k doesn't need any special per-pte or
* per-vma handling..
*/
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
/*
* .. because we flush the whole mm when it
* fills up.
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#include <asm-generic/tlb.h>
#endif
/* _M68K_TLB_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment