Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
dad0dc2e
Commit
dad0dc2e
authored
Jul 11, 2002
by
David Gibson
Committed by
Paul Mackerras
Jul 11, 2002
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
PPC32: clean up the initial mapping of RAM, allow for large-page mappings.
parent
a321a55f
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
34 additions
and
26 deletions
+34
-26
arch/ppc/mm/mmu_decl.h
arch/ppc/mm/mmu_decl.h
+3
-1
arch/ppc/mm/pgtable.c
arch/ppc/mm/pgtable.c
+7
-24
arch/ppc/mm/ppc_mmu.c
arch/ppc/mm/ppc_mmu.c
+7
-1
include/asm-ppc/pgtable.h
include/asm-ppc/pgtable.h
+17
-0
No files found.
arch/ppc/mm/mmu_decl.h
View file @
dad0dc2e
...
...
@@ -25,7 +25,6 @@
#include <asm/tlbflush.h>
extern
void
mapin_ram
(
void
);
extern
void
bat_mapin_ram
(
void
);
extern
int
map_page
(
unsigned
long
va
,
unsigned
long
pa
,
int
flags
);
extern
void
setbat
(
int
index
,
unsigned
long
virt
,
unsigned
long
phys
,
unsigned
int
size
,
int
flags
);
...
...
@@ -49,14 +48,17 @@ extern unsigned long Hash_size, Hash_mask;
#if defined(CONFIG_8xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
#define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
extern
void
MMU_init_hw
(
void
);
#define mmu_mapin_ram() (0UL)
#else
/* anything except 4xx or 8xx */
extern
void
MMU_init_hw
(
void
);
extern
unsigned
long
mmu_mapin_ram
(
void
);
/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
* which includes all new 82xx processors. We need tlbie/tlbsync here
...
...
arch/ppc/mm/pgtable.c
View file @
dad0dc2e
...
...
@@ -252,31 +252,14 @@ void __init mapin_ram(void)
{
unsigned
long
v
,
p
,
s
,
f
;
#ifdef HAVE_BATS
if
(
!
__map_without_bats
)
bat_mapin_ram
();
#endif
/* HAVE_BATS */
v
=
KERNELBASE
;
p
=
PPC_MEMSTART
;
for
(
s
=
0
;
s
<
total_lowmem
;
s
+=
PAGE_SIZE
)
{
/* On the MPC8xx, we want the page shared so we
* don't get ASID compares on kernel space.
*/
f
=
_PAGE_PRESENT
|
_PAGE_ACCESSED
|
_PAGE_SHARED
|
_PAGE_HWEXEC
;
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* Allows stub to set breakpoints everywhere */
f
|=
_PAGE_WRENABLE
;
#else
/* !CONFIG_KGDB && !CONFIG_XMON */
if
((
char
*
)
v
<
_stext
||
(
char
*
)
v
>=
etext
)
f
|=
_PAGE_WRENABLE
;
#ifdef CONFIG_PPC_STD_MMU
s
=
mmu_mapin_ram
();
v
=
KERNELBASE
+
s
;
p
=
PPC_MEMSTART
+
s
;
for
(;
s
<
total_lowmem
;
s
+=
PAGE_SIZE
)
{
if
((
char
*
)
v
>=
_stext
&&
(
char
*
)
v
<
etext
)
f
=
_PAGE_RAM_TEXT
;
else
/* On the powerpc (not all), no user access
forces R/W kernel access */
f
|=
_PAGE_USER
;
#endif
/* CONFIG_PPC_STD_MMU */
#endif
/* CONFIG_KGDB || CONFIG_XMON */
f
=
_PAGE_RAM
;
map_page
(
v
,
p
,
f
);
v
+=
PAGE_SIZE
;
p
+=
PAGE_SIZE
;
...
...
arch/ppc/mm/ppc_mmu.c
View file @
dad0dc2e
...
...
@@ -87,12 +87,15 @@ unsigned long p_mapped_by_bats(unsigned long pa)
return
0
;
}
void
__init
bat
_mapin_ram
(
void
)
unsigned
long
__init
mmu
_mapin_ram
(
void
)
{
unsigned
long
tot
,
bl
,
done
;
unsigned
long
max_size
=
(
256
<<
20
);
unsigned
long
align
;
if
(
__map_without_bats
)
return
0
;
/* Set up BAT2 and if necessary BAT3 to cover RAM. */
/* Make sure we don't map a block larger than the
...
...
@@ -119,7 +122,10 @@ void __init bat_mapin_ram(void)
break
;
setbat
(
3
,
KERNELBASE
+
done
,
PPC_MEMSTART
+
done
,
bl
,
_PAGE_KERNEL
);
done
=
(
unsigned
long
)
bat_addrs
[
3
].
limit
-
KERNELBASE
+
1
;
}
return
done
;
}
/*
...
...
include/asm-ppc/pgtable.h
View file @
dad0dc2e
...
...
@@ -246,6 +246,23 @@ extern unsigned long ioremap_bot, ioremap_base;
#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC
#define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
#define _PAGE_RAM _PAGE_KERNEL
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* We want the debuggers to be able to set breakpoints anywhere, so
* don't write protect the kernel text */
#define _PAGE_RAM_TEXT _PAGE_RAM
#else
#ifdef CONFIG_PPC_STD_MMU
/* On standard PPC MMU, no user access implies kernel read/write
* access, so to write-protect the kernel text we must turn on user
* access */
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE) | _PAGE_USER
#else
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE)
#endif
#endif
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment